diff --git a/.github/workflows/build-test-publish.yaml b/.github/workflows/build-test-publish.yaml index 309ca9ce..307317d8 100644 --- a/.github/workflows/build-test-publish.yaml +++ b/.github/workflows/build-test-publish.yaml @@ -55,7 +55,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: 1.22.0 + go-version: 1.23.2 # Dependencies are cached by default: https://github.com/actions/setup-go#v4 # This can be explicitly disabled if it ever causes problems. @@ -271,8 +271,8 @@ jobs: strategy: fail-fast: true matrix: - k8s-version: [1.25.16, 1.26.14, 1.27.11, 1.28.7] - beegfs-version: [7.3.4, 7.4.2] + k8s-version: [1.27.11, 1.28.7, 1.29.10, 1.30.6, 1.31.2] + beegfs-version: [7.3.4, 7.4.5] permissions: packages: read contents: read @@ -376,8 +376,8 @@ jobs: strategy: fail-fast: true matrix: - k8s-version: [1.25.16, 1.26.14, 1.27.11, 1.28.7] - beegfs-version: [7.3.4, 7.4.2] + k8s-version: [1.27.11, 1.28.7, 1.29.10, 1.30.6, 1.31.2] + beegfs-version: [7.3.4, 7.4.5] permissions: packages: read contents: read diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml index 40f3df00..89727c46 100644 --- a/.github/workflows/codeql.yaml +++ b/.github/workflows/codeql.yaml @@ -54,7 +54,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v4 with: - go-version: 1.22.0 + go-version: 1.23.2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/CHANGELOG.md b/CHANGELOG.md index 99d73c4f..d069321a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,26 @@ # Changelog Notable changes to the BeeGFS CSI driver will be documented in this file. +[1.7.0] - 2024-11-04 +-------------------- + +### Added +- Support for BeeGFS v7.4.5, Kubernetes v1.29, v1.30, and v1.31. +- Support for volume resizing. + - Note that volume capacity still has no effect when using the driver. However, support for + resizing can be helpful for applications that rely on the size of the Persistent Volume (PV) or + Persistent Volume Claim (PVC) as indicated in the Kubernetes API. + +### Deprecated +- Kubernetes v1.27 and v1.28 support will be dropped in the next driver release according to our + [support + policy](docs/compatibility.md#dropping-compatibility-support-for-old-kubernetes-releases). +- BeeGFS 7.3 support will be dropped in the next driver release. Users are advised to upgrade to + BeeGFS 7.4. + +### Removed +- Support/testing for Kubernetes v1.25 and v1.26. + [1.6.0] - 2024-02-28 -------------------- diff --git a/Makefile b/Makefile index 681c847d..c3e23b16 100644 --- a/Makefile +++ b/Makefile @@ -24,8 +24,21 @@ CMDS ?= beegfs-csi-driver TEST_GO_FILTER_CMD = -e '/test/e2e' -e '/operator' all: build build-chwrap bin/chwrap.tar +# This check is primarily meant to ensure when the Go version in go.mod changes, CI workflows are +# also updated to use this version of Go. +.PHONY: check-go-version check-go-version: - ./hack/check-go-version.sh + @echo "Checking Go version..." + @{ \ + GO_MOD_VERSION=go$$(grep '^go ' go.mod | awk '{print $$2}'); \ + INSTALLED_VERSION=$$(go version | { read _ _ ver _; echo $${ver}; }); \ + if [ "$$INSTALLED_VERSION" = "$$GO_MOD_VERSION" ]; then \ + echo "INFO: go.mod requests ($$GO_MOD_VERSION) and the build environment has $$INSTALLED_VERSION."; \ + else \ + echo "ERROR: go.mod requests $$GO_MOD_VERSION but the build environment has $$INSTALLED_VERSION."; \ + exit 1; \ + fi \ + } || { echo >&2 "ERROR: determining version of Go failed"; exit 1; } .PHONY: generate-notices generate-notices: diff --git a/NOTICE.md b/NOTICE.md index a40b6036..677ecaad 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -226,6 +226,237 @@ This software includes code and libraries from: ``` +## github.com/coreos/go-systemd/v22/dbus + +* License: Apache-2.0 +* Project URL: [github.com/coreos/go-systemd/v22/dbus](https://github.com/coreos/go-systemd/v22/dbus) + +### Original License +``` +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## github.com/fxamacker/cbor/v2 + +* License: MIT +* Project URL: [github.com/fxamacker/cbor/v2](https://github.com/fxamacker/cbor/v2) + +### Original License +``` +MIT License + +Copyright (c) 2019-present Faye Amacker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +``` + ## github.com/go-logr/logr * License: Apache-2.0 @@ -437,6 +668,41 @@ This software includes code and libraries from: ``` +## github.com/godbus/dbus/v5 + +* License: BSD-2-Clause +* Project URL: [github.com/godbus/dbus/v5](https://github.com/godbus/dbus/v5) + +### Original License +``` +Copyright (c) 2013, Georg Reinke (), Google +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +``` + ## github.com/gogo/protobuf * License: BSD-3-Clause @@ -1819,13 +2085,14 @@ SOFTWARE. ``` -## github.com/opencontainers/selinux +## github.com/opencontainers/runc/libcontainer * License: Apache-2.0 -* Project URL: [github.com/opencontainers/selinux](https://github.com/opencontainers/selinux) +* Project URL: [github.com/opencontainers/runc/libcontainer](https://github.com/opencontainers/runc/libcontainer) ### Original License ``` + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -2003,18 +2270,7 @@ SOFTWARE. END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright 2014 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2030,47 +2286,15 @@ SOFTWARE. ``` -## github.com/pkg/errors - -* License: BSD-2-Clause -* Project URL: [github.com/pkg/errors](https://github.com/pkg/errors) - -### Original License -``` -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -``` - -## github.com/spf13/afero +## github.com/opencontainers/runtime-spec/specs-go * License: Apache-2.0 -* Project URL: [github.com/spf13/afero](https://github.com/spf13/afero) +* Project URL: [github.com/opencontainers/runtime-spec/specs-go](https://github.com/opencontainers/runtime-spec/specs-go) ### Original License ``` - Apache License + + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -2245,127 +2469,31 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -``` - -## golang.org/x/net - -* License: BSD-3-Clause -* Project URL: [golang.org/x/net](https://golang.org/x/net) - -### Original License -``` -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -``` - -## golang.org/x/sys/unix - -* License: BSD-3-Clause -* Project URL: [golang.org/x/sys/unix](https://golang.org/x/sys/unix) - -### Original License -``` -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -``` - -## golang.org/x/text - -* License: BSD-3-Clause -* Project URL: [golang.org/x/text](https://golang.org/x/text) + END OF TERMS AND CONDITIONS -### Original License -``` -Copyright (c) 2009 The Go Authors. All rights reserved. + Copyright 2015 The Linux Foundation. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + http://www.apache.org/licenses/LICENSE-2.0 -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ``` -## google.golang.org/genproto/googleapis/rpc/status +## github.com/opencontainers/selinux * License: Apache-2.0 -* Project URL: [google.golang.org/genproto/googleapis/rpc/status](https://google.golang.org/genproto/googleapis/rpc/status) +* Project URL: [github.com/opencontainers/selinux](https://github.com/opencontainers/selinux) ### Original License ``` - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -2546,7 +2674,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -2554,7 +2682,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2570,15 +2698,78 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## google.golang.org/grpc +## github.com/pkg/errors -* License: Apache-2.0 -* Project URL: [google.golang.org/grpc](https://google.golang.org/grpc) +* License: BSD-2-Clause +* Project URL: [github.com/pkg/errors](https://github.com/pkg/errors) ### Original License ``` +Copyright (c) 2015, Dave Cheney +All rights reserved. - Apache License +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +``` + +## github.com/sirupsen/logrus + +* License: MIT +* Project URL: [github.com/sirupsen/logrus](https://github.com/sirupsen/logrus) + +### Original License +``` +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +``` + +## github.com/spf13/afero + +* License: Apache-2.0 +* Project URL: [github.com/spf13/afero](https://github.com/spf13/afero) + +### Original License +``` + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -2753,43 +2944,48 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS +``` - APPENDIX: How to apply the Apache License to your work. +## github.com/x448/float16 - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +* License: MIT +* Project URL: [github.com/x448/float16](https://github.com/x448/float16) - Copyright [yyyy] [name of copyright owner] +### Original License +``` +MIT License - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker - http://www.apache.org/licenses/LICENSE-2.0 +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. ``` -## google.golang.org/protobuf +## golang.org/x/net * License: BSD-3-Clause -* Project URL: [google.golang.org/protobuf](https://google.golang.org/protobuf) +* Project URL: [golang.org/x/net](https://golang.org/x/net) ### Original License ``` -Copyright (c) 2018 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2801,7 +2997,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -2819,15 +3015,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## gopkg.in/inf.v0 +## golang.org/x/sys/unix * License: BSD-3-Clause -* Project URL: [gopkg.in/inf.v0](https://gopkg.in/inf.v0) +* Project URL: [golang.org/x/sys/unix](https://golang.org/x/sys/unix) ### Original License ``` -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -2839,7 +3034,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -2857,214 +3052,51 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## gopkg.in/ini.v1 +## golang.org/x/text -* License: Apache-2.0 -* Project URL: [gopkg.in/ini.v1](https://gopkg.in/ini.v1) +* License: BSD-3-Clause +* Project URL: [golang.org/x/text](https://golang.org/x/text) ### Original License ``` -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. +Copyright 2009 The Go Authors. - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - http://www.apache.org/licenses/LICENSE-2.0 + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## gopkg.in/yaml.v2 +## google.golang.org/genproto/googleapis/rpc/status * License: Apache-2.0 -* Project URL: [gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) +* Project URL: [google.golang.org/genproto/googleapis/rpc/status](https://google.golang.org/genproto/googleapis/rpc/status) ### Original License ``` + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -3245,7 +3277,7 @@ third-party archives. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -3253,7 +3285,7 @@ third-party archives. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -3269,10 +3301,10 @@ third-party archives. ``` -## k8s.io/api/core/v1 +## google.golang.org/grpc * License: Apache-2.0 -* Project URL: [k8s.io/api/core/v1](https://k8s.io/api/core/v1) +* Project URL: [google.golang.org/grpc](https://google.golang.org/grpc) ### Original License ``` @@ -3481,226 +3513,52 @@ third-party archives. ``` -## k8s.io/apimachinery/pkg +## google.golang.org/protobuf -* License: Apache-2.0 -* Project URL: [k8s.io/apimachinery/pkg](https://k8s.io/apimachinery/pkg) +* License: BSD-3-Clause +* Project URL: [google.golang.org/protobuf](https://google.golang.org/protobuf) ### Original License ``` +Copyright (c) 2018 The Go Authors. All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - http://www.apache.org/licenses/LICENSE-2.0 + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## k8s.io/apimachinery/third_party/forked/golang/reflect +## gopkg.in/inf.v0 * License: BSD-3-Clause -* Project URL: [k8s.io/apimachinery/third_party/forked/golang/reflect](https://k8s.io/apimachinery/third_party/forked/golang/reflect) +* Project URL: [gopkg.in/inf.v0](https://gopkg.in/inf.v0) ### Original License ``` -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -3730,10 +3588,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## k8s.io/klog/v2 +## gopkg.in/ini.v1 * License: Apache-2.0 -* Project URL: [k8s.io/klog/v2](https://k8s.io/klog/v2) +* Project URL: [gopkg.in/ini.v1](https://gopkg.in/ini.v1) ### Original License ``` @@ -3915,13 +3773,1309 @@ recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## gopkg.in/yaml.v2 + +* License: Apache-2.0 +* Project URL: [gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +### Original License +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## k8s.io/api/core/v1 + +* License: Apache-2.0 +* Project URL: [k8s.io/api/core/v1](https://k8s.io/api/core/v1) + +### Original License +``` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## k8s.io/apimachinery/pkg + +* License: Apache-2.0 +* Project URL: [k8s.io/apimachinery/pkg](https://k8s.io/apimachinery/pkg) + +### Original License +``` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## k8s.io/apimachinery/third_party/forked/golang/reflect + +* License: BSD-3-Clause +* Project URL: [k8s.io/apimachinery/third_party/forked/golang/reflect](https://k8s.io/apimachinery/third_party/forked/golang/reflect) + +### Original License +``` +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +``` + +## k8s.io/klog/v2 + +* License: Apache-2.0 +* Project URL: [k8s.io/klog/v2](https://k8s.io/klog/v2) + +### Original License +``` +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## k8s.io/mount-utils + +* License: Apache-2.0 +* Project URL: [k8s.io/mount-utils](https://k8s.io/mount-utils) + +### Original License +``` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +``` + +## k8s.io/utils + +* License: Apache-2.0 +* Project URL: [k8s.io/utils](https://k8s.io/utils) + +### Original License +``` + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -3931,10 +5085,47 @@ third-party archives. ``` -## k8s.io/mount-utils +## k8s.io/utils/internal/third_party/forked/golang/net + +* License: BSD-3-Clause +* Project URL: [k8s.io/utils/internal/third_party/forked/golang/net](https://k8s.io/utils/internal/third_party/forked/golang/net) + +### Original License +``` +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +``` + +## sigs.k8s.io/controller-runtime/pkg/scheme * License: Apache-2.0 -* Project URL: [k8s.io/mount-utils](https://k8s.io/mount-utils) +* Project URL: [sigs.k8s.io/controller-runtime/pkg/scheme](https://sigs.k8s.io/controller-runtime/pkg/scheme) ### Original License ``` @@ -4142,13 +5333,15 @@ third-party archives. ``` -## k8s.io/utils +## sigs.k8s.io/json * License: Apache-2.0 -* Project URL: [k8s.io/utils](https://k8s.io/utils) +* Project URL: [sigs.k8s.io/json](https://sigs.k8s.io/json) ### Original License ``` +Files other than internal/golang/* licensed under: + Apache License Version 2.0, January 2004 @@ -4330,7 +5523,7 @@ third-party archives. APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -4338,7 +5531,7 @@ third-party archives. same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -4352,16 +5545,13 @@ third-party archives. See the License for the specific language governing permissions and limitations under the License. -``` -## k8s.io/utils/internal/third_party/forked/golang/net +------------------ -* License: BSD-3-Clause -* Project URL: [k8s.io/utils/internal/third_party/forked/golang/net](https://k8s.io/utils/internal/third_party/forked/golang/net) +internal/golang/* files licensed under: -### Original License -``` -Copyright (c) 2012 The Go Authors. All rights reserved. + +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -4391,10 +5581,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## sigs.k8s.io/controller-runtime/pkg/scheme +## sigs.k8s.io/structured-merge-diff/v4/value * License: Apache-2.0 -* Project URL: [sigs.k8s.io/controller-runtime/pkg/scheme](https://sigs.k8s.io/controller-runtime/pkg/scheme) +* Project URL: [sigs.k8s.io/structured-merge-diff/v4/value](https://sigs.k8s.io/structured-merge-diff/v4/value) ### Original License ``` @@ -4602,15 +5792,117 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## sigs.k8s.io/json +## sigs.k8s.io/yaml * License: Apache-2.0 -* Project URL: [sigs.k8s.io/json](https://sigs.k8s.io/json) +* Project URL: [sigs.k8s.io/yaml](https://sigs.k8s.io/yaml) ### Original License ``` -Files other than internal/golang/* licensed under: +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +# The forked go-yaml.v2 library under the project is covered by an +Apache license: Apache License Version 2.0, January 2004 @@ -4814,46 +6106,12 @@ Files other than internal/golang/* licensed under: See the License for the specific language governing permissions and limitations under the License. - ------------------- - -internal/golang/* files licensed under: - - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ``` -## sigs.k8s.io/structured-merge-diff/v4/value +## sigs.k8s.io/yaml/goyaml.v2 * License: Apache-2.0 -* Project URL: [sigs.k8s.io/structured-merge-diff/v4/value](https://sigs.k8s.io/structured-merge-diff/v4/value) +* Project URL: [sigs.k8s.io/yaml/goyaml.v2](https://sigs.k8s.io/yaml/goyaml.v2) ### Original License ``` @@ -5061,66 +6319,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ``` -## sigs.k8s.io/yaml - -* License: MIT -* Project URL: [sigs.k8s.io/yaml](https://sigs.k8s.io/yaml) - -### Original License -``` -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -``` - --- diff --git a/README.md b/README.md index 7288eda2..4b62a41e 100644 --- a/README.md +++ b/README.md @@ -80,6 +80,7 @@ supported. | BeeGFS CSI Driver | K8s Versions | BeeGFS Client Versions | CSI Version | | ----------------- | ----------------------------------------- | ---------------------- | ----------- | +| v1.7.0 | 1.27.11, 1.28.7, 1.29.10, 1.30.6, 1.31.2 | 7.3.4, 7.4.5 | v1.10.0 | | v1.6.0 | 1.25.16, 1.26.14, 1.27.11, 1.28.7 | 7.3.4, 7.4.2 | v1.8.0 | | v1.5.0 | 1.23.17, 1.24.15, 1.25.11, 1.26.3, 1.27.3 | 7.3.4, 7.4.0 | v1.7.0 | | v1.4.0 | 1.22.6, 1.23.5, 1.24.1, 1.25.2 | 7.3.2, 7.2.8 | v1.7.0 | diff --git a/deploy/k8s/bases/csi-beegfs-controller.yaml b/deploy/k8s/bases/csi-beegfs-controller.yaml index 3a8063b7..4234cc58 100644 --- a/deploy/k8s/bases/csi-beegfs-controller.yaml +++ b/deploy/k8s/bases/csi-beegfs-controller.yaml @@ -70,7 +70,7 @@ spec: cpu: 10m memory: 20Mi - name: beegfs - image: ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0 + image: ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0 args: - --driver-name=beegfs.csi.netapp.com - --node-id=$(KUBE_NODE_NAME) diff --git a/deploy/k8s/bases/csi-beegfs-node.yaml b/deploy/k8s/bases/csi-beegfs-node.yaml index 0b0450be..d48af286 100644 --- a/deploy/k8s/bases/csi-beegfs-node.yaml +++ b/deploy/k8s/bases/csi-beegfs-node.yaml @@ -46,7 +46,7 @@ spec: cpu: 80m memory: 10Mi - name: beegfs - image: ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0 + image: ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0 args: - --driver-name=beegfs.csi.netapp.com - --node-id=$(KUBE_NODE_NAME) diff --git a/deploy/nomad/controller.nomad b/deploy/nomad/controller.nomad index 683d71d1..74ab9dd0 100644 --- a/deploy/nomad/controller.nomad +++ b/deploy/nomad/controller.nomad @@ -22,7 +22,7 @@ job "beegfs-csi-plugin-controller" { driver = "docker" config { - image = "ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0" + image = "ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0" # chwrap is used to execute the beegfs-ctl binary already installed on the host. We also read the # beegfs-client.conf template already installed on the host. diff --git a/deploy/nomad/node.nomad b/deploy/nomad/node.nomad index 21fa51a7..4f100ca1 100644 --- a/deploy/nomad/node.nomad +++ b/deploy/nomad/node.nomad @@ -20,7 +20,7 @@ job "beegfs-csi-plugin-node" { driver = "docker" config { - image = "ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0" + image = "ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0" # chwrap is used to execute the beegfs-ctl binary already installed on the host. We also read the # beegfs-client.conf template already installed on the host. diff --git a/docs/deployment.md b/docs/deployment.md index 11018559..234e96ad 100644 --- a/docs/deployment.md +++ b/docs/deployment.md @@ -79,8 +79,8 @@ cosign verify --key ghcr.io/thinkparq/beegfs-csi-driver: ``` Examples: ``` -cosign verify --key cosign.pub ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0 -cosign verify --key https://raw.githubusercontent.com/ThinkParQ/beegfs-csi-driver/master/release/cosign.pub ghcr.io/thinkparq/beegfs-csi-driver:v1.6.0 +cosign verify --key cosign.pub ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0 +cosign verify --key https://raw.githubusercontent.com/ThinkParQ/beegfs-csi-driver/master/release/cosign.pub ghcr.io/thinkparq/beegfs-csi-driver:v1.7.0 ``` OPTION 2: Validate the image using the version tag and digest: diff --git a/go.mod b/go.mod index df7f29aa..962b13f4 100644 --- a/go.mod +++ b/go.mod @@ -1,166 +1,170 @@ module github.com/netapp/beegfs-csi-driver -go 1.22 +go 1.23.2 require ( - github.com/container-storage-interface/spec v1.8.0 - github.com/go-logr/logr v1.3.0 - github.com/kubernetes-csi/csi-lib-utils v0.11.0 + github.com/container-storage-interface/spec v1.10.0 + github.com/go-logr/logr v1.4.2 + github.com/kubernetes-csi/csi-lib-utils v0.19.0 github.com/kubernetes-csi/csi-test/v4 v4.4.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 v2.13.0 - github.com/onsi/gomega v1.29.0 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 github.com/opencontainers/selinux v1.11.0 github.com/pkg/errors v0.9.1 github.com/spf13/afero v1.9.2 - golang.org/x/net v0.19.0 - golang.org/x/sys v0.15.0 - google.golang.org/grpc v1.58.3 + golang.org/x/net v0.30.0 + golang.org/x/sys v0.26.0 + google.golang.org/grpc v1.65.0 gopkg.in/ini.v1 v1.67.0 - k8s.io/api v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/client-go v1.5.2 - k8s.io/klog/v2 v2.110.1 - k8s.io/kubernetes v1.29.2 + k8s.io/api v0.31.2 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 + k8s.io/klog/v2 v2.130.1 + k8s.io/kubernetes v1.31.2 k8s.io/mount-utils v0.28.2 k8s.io/pod-security-admission v0.28.2 - sigs.k8s.io/controller-runtime v0.15.3 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/controller-runtime v0.19.1 + sigs.k8s.io/yaml v1.4.0 ) require ( github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.2.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.2.4 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/cel-go v0.17.7 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/cel-go v0.20.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/opencontainers/runc v1.1.13 // indirect + github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.10 // indirect - go.etcd.io/etcd/client/v3 v3.5.10 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect - go.opentelemetry.io/otel v1.19.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect - go.opentelemetry.io/otel/metric v1.19.0 // indirect - go.opentelemetry.io/otel/sdk v1.19.0 // indirect - go.opentelemetry.io/otel/trace v1.19.0 // indirect - go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/atomic v1.10.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.etcd.io/etcd/api/v3 v3.5.14 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect + go.etcd.io/etcd/client/v3 v3.5.14 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.16.1 // indirect - gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.26.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/protobuf v1.35.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.27.7 // indirect - k8s.io/apiserver v0.29.2 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect + k8s.io/apiserver v0.31.2 // indirect k8s.io/cloud-provider v0.25.3 // indirect - k8s.io/component-base v0.29.2 // indirect - k8s.io/component-helpers v0.29.2 // indirect - k8s.io/controller-manager v0.29.2 // indirect + k8s.io/component-base v0.31.2 // indirect + k8s.io/component-helpers v0.31.2 // indirect + k8s.io/controller-manager v0.31.2 // indirect k8s.io/csi-translation-lib v0.25.3 // indirect - k8s.io/kms v0.29.2 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kms v0.31.2 // indirect + k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) replace ( - k8s.io/api => k8s.io/api v0.29.2 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.2 - k8s.io/apimachinery => k8s.io/apimachinery v0.29.2 - k8s.io/apiserver => k8s.io/apiserver v0.29.2 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.2 - k8s.io/client-go => k8s.io/client-go v0.29.2 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.2 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.2 - k8s.io/code-generator => k8s.io/code-generator v0.29.2 - k8s.io/component-base => k8s.io/component-base v0.29.2 - k8s.io/component-helpers => k8s.io/component-helpers v0.29.2 - k8s.io/controller-manager => k8s.io/controller-manager v0.29.2 - k8s.io/cri-api => k8s.io/cri-api v0.29.2 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.2 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.2 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.2 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.2 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.2 - k8s.io/kubectl => k8s.io/kubectl v0.29.2 - k8s.io/kubelet => k8s.io/kubelet v0.29.2 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.2 - k8s.io/metrics => k8s.io/metrics v0.29.2 - k8s.io/mount-utils => k8s.io/mount-utils v0.29.2 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.29.2 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.2 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.2 - k8s.io/sample-controller => k8s.io/sample-controller v0.29.2 + k8s.io/api => k8s.io/api v0.31.2 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.31.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.31.2 + k8s.io/apiserver => k8s.io/apiserver v0.31.2 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.31.2 + k8s.io/client-go => k8s.io/client-go v0.31.2 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.31.2 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.31.2 + k8s.io/code-generator => k8s.io/code-generator v0.31.2 + k8s.io/component-base => k8s.io/component-base v0.31.2 + k8s.io/component-helpers => k8s.io/component-helpers v0.31.2 + k8s.io/controller-manager => k8s.io/controller-manager v0.31.2 + k8s.io/cri-api => k8s.io/cri-api v0.31.2 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.31.2 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.31.2 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.31.2 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.31.2 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.31.2 + k8s.io/kubectl => k8s.io/kubectl v0.31.2 + k8s.io/kubelet => k8s.io/kubelet v0.31.2 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.31.2 + k8s.io/metrics => k8s.io/metrics v0.31.2 + k8s.io/mount-utils => k8s.io/mount-utils v0.31.2 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.31.2 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.31.2 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.31.2 + k8s.io/sample-controller => k8s.io/sample-controller v0.31.2 ) diff --git a/go.sum b/go.sum index 6c7e4428..75515927 100644 --- a/go.sum +++ b/go.sum @@ -17,797 +17,48 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= -cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= -cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps= -cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= -cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= -cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= -cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= -cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= -cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= -cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= -cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= -cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E= -cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= -cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= -cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= -cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= -cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= -cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= -cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= -cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8= -cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM= -cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ= -cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc= -cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= -cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M= -cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc= -cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0= -cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= -cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= -cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= -cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= -cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= -cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= -github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= -github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -816,133 +67,75 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230428030218-4003588d1b74/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/container-storage-interface/spec v1.6.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= -github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= -github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= +github.com/container-storage-interface/spec v1.10.0 h1:YkzWPV39x+ZMTa6Ax2czJLLwpryrQ+dPesB34mrRMXA= +github.com/container-storage-interface/spec v1.10.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= -github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/envoyproxy/protoc-gen-validate v1.0.1/go.mod h1:0vj8bNkYbSTNS2PIyH87KZaeN4x9zpL9Qt8fQC7d+vs= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -955,7 +148,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -972,19 +164,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cel-go v0.17.7 h1:6ebJFzu1xO2n7TLtN+UBqShGBhlD85bhvglh5DpcfqQ= -github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -995,24 +183,18 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -1023,140 +205,75 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= -github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-csi/csi-lib-utils v0.11.0 h1:FHWOBtAZBA/hVk7v/qaXgG9Sxv0/n06DebPFuDwumqg= -github.com/kubernetes-csi/csi-lib-utils v0.11.0/go.mod h1:BmGZZB16L18+9+Lgg9YWwBKfNEHIDdgGfAyuW6p2NV0= +github.com/kubernetes-csi/csi-lib-utils v0.19.0 h1:3sT8mL9+St2acyrEtuR7CQ5L78GR4lgsb+sfon9tGfA= +github.com/kubernetes-csi/csi-lib-utils v0.19.0/go.mod h1:lBuMKvoyd8c3EG+itmnVWApLDHnLkU7ibxxZSPuOw0M= github.com/kubernetes-csi/csi-test/v4 v4.4.0 h1:r0mnAwDURI24Vw3a/LyA/ga11yD5ZGuU7+REO35Na9s= github.com/kubernetes-csi/csi-test/v4 v4.4.0/go.mod h1:t1RzseMZJKy313nezI/d7TolbbiKpUZM3SXQvXxOX0w= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= -github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -1168,142 +285,65 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= -github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= -github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= -github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= -github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= -github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= -github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= -github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= -github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= -github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= -github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= -github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= -github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/runc v1.1.13 h1:98S2srgG9vw0zWcDpFMn5TRrh8kLxa/5OFUstuUhmRs= +github.com/opencontainers/runc v1.1.13/go.mod h1:R016aXacfp/gwQBYw2FDGa9m+n6atbLWrYY8hNMT/sA= +github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78 h1:R5M2qXZiK/mWPMT4VldCOiSL9HIAMuxQZWdG0CSM5+4= +github.com/opencontainers/runtime-spec v1.0.3-0.20220909204839-494a5a6aca78/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= +github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= -github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1311,122 +351,76 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= -go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= -go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= -go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= -go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= -go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= -go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= -go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= -go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= -go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= -go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= -go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= -go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= -go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= -go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= -go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/etcd/api/v3 v3.5.14 h1:vHObSCxyB9zlF60w7qzAdTcGaglbJOpSj1Xj9+WGxq0= +go.etcd.io/etcd/api/v3 v3.5.14/go.mod h1:BmtWcRlQvwa1h3G2jvKYwIQy4PkHlDej5t7uLMUdJUU= +go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxvuWxQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI= +go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8= +go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg= +go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg= +go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk= +go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M= +go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0= +go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA= +go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw= +go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok= +go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 h1:ZOLJc06r4CB42laIXg/7udr0pbZyuAihN10A/XuiQRY= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 h1:KfYpVmrjI7JuToy5k8XV3nkapjWx48k4E4JOtVstzQI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0/go.mod h1:SeQhzAEccGVZVEy7aH87Nh0km+utSpo1pTv6eMMop48= -go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= -go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= -go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 h1:3d+S281UTjM+AbF31XSOYn1qXn3BgIdWl8HNEpx08Jk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0/go.mod h1:0+KuTDyKL4gjKCF75pHOX4wuzYDUZYfAQdSu43o+Z2I= -go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= -go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= -go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= -go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= -go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= -go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= -go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= -go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= -go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1438,7 +432,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1450,22 +443,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1473,7 +455,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1494,49 +475,14 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1546,29 +492,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1580,23 +505,13 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1608,9 +523,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1622,8 +535,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1632,115 +543,38 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1753,9 +587,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1775,7 +607,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1785,7 +616,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1794,44 +624,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= -golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= -golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= -golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc= -gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1851,56 +651,12 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E= -google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= -google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= -google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1939,132 +695,13 @@ google.golang.org/genproto v0.0.0-20201209185603-f92720507ed4/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e h1:z3vDksarJxsAKM5dmEGv0GHwE2hKJ096wZra71Vs4sw= -google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= +google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2083,36 +720,9 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2126,19 +736,15 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -2148,17 +754,14 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2168,116 +771,55 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= -k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/cloud-provider v0.29.2 h1:ghKNXoQmeP8Fj/YTJNR6xQOzNrKXt6YZyy6mOEEa3yg= -k8s.io/cloud-provider v0.29.2/go.mod h1:KAp+07AUGmxcLnoLY5FndU4hj6158KMbiviNgctNRUk= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= -k8s.io/component-helpers v0.29.2 h1:1kTIanIdqUVG2nW3e2ENVEaYbZKphqPgEdCmJvk71aw= -k8s.io/component-helpers v0.29.2/go.mod h1:gFc/p60rYtpD8UCcNfPCmbokHT2uy0yDpmr/KKUMNAw= -k8s.io/controller-manager v0.29.2 h1:S99UKzjvyFWG4WZWaWQ+iu64X9axwzbi4152tFd73+4= -k8s.io/controller-manager v0.29.2/go.mod h1:xghbiyv5l/SVA5yVvRuGDmNVJEGl7MQqPAD0hvjZLhM= -k8s.io/csi-translation-lib v0.29.2 h1:TJVZTzR7gj6+HSb+jJxLUxnAuwrEy71IxhJ4nmTzyjE= -k8s.io/csi-translation-lib v0.29.2/go.mod h1:vbSYY4c6mVPwTHAvb5V3CHlq/dmQFIZC1SJOsaFiY3I= -k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0= +k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4= +k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/cloud-provider v0.31.2 h1:ZdstxJOcHzP45j8giE79VJmgNckBbw1ldtIqnhZvKIA= +k8s.io/cloud-provider v0.31.2/go.mod h1:+4L8YpYCEx1Ko5EKS/PzXyBY5YREwDKChsJjAxHRf08= +k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA= +k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ= +k8s.io/component-helpers v0.31.2 h1:V2yjoNeyg8WfvwrJwzfYz+RUwjlbcAIaDaHEStBbaZM= +k8s.io/component-helpers v0.31.2/go.mod h1:cNz+1ck38R0qWrjcw/rhQgGP6+Gwgw8ngr2ziDNmSXM= +k8s.io/controller-manager v0.31.2 h1:S5FPrZo6wpcTwQHfwcklOkr3MKGvKYtroOtCqri67Ns= +k8s.io/controller-manager v0.31.2/go.mod h1:+MymA0Dg2pC+hYYNsca9shRZbrkK3HKIg2B11p6eC+Q= +k8s.io/csi-translation-lib v0.31.2 h1:nCOE28rTUVOjwUKs4wl8wIZBHEnbQU5MkI0P6YM/Ylg= +k8s.io/csi-translation-lib v0.31.2/go.mod h1:X0paKG/WAqDyrfpiTyj8ga4srG0guOm2Sj2pNi4kP10= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kms v0.29.2 h1:MDsbp98gSlEQs7K7dqLKNNTwKFQRYYvO4UOlBOjNy6Y= -k8s.io/kms v0.29.2/go.mod h1:s/9RC4sYRZ/6Tn6yhNjbfJuZdb8LzlXhdlBnKizeFDo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/kubectl v0.29.2 h1:uaDYaBhumvkwz0S2XHt36fK0v5IdNgL7HyUniwb2IUo= -k8s.io/kubectl v0.29.2/go.mod h1:BhizuYBGcKaHWyq+G7txGw2fXg576QbPrrnQdQDZgqI= -k8s.io/kubelet v0.29.2 h1:bQ2StqkUqPCFNLtGLsb3v3O2LKQHXNMju537zOGboRg= -k8s.io/kubelet v0.29.2/go.mod h1:i5orNPqW/fAMrqptbCXFW/vLBBP12TZZc41IrrvF7SY= -k8s.io/kubernetes v1.29.2 h1:8hh1cntqdulanjQt7wSSSsJfBgOyx6fUdFWslvGL5m0= -k8s.io/kubernetes v1.29.2/go.mod h1:xZPKU0yO0CBbLTnbd+XGyRmmtmaVuJykDb8gNCkeeUE= -k8s.io/mount-utils v0.29.2 h1:FrUfgvOo63nqJRPXKoqN/DW1lMnR/y0pzpFErKh6p2o= -k8s.io/mount-utils v0.29.2/go.mod h1:9IWJTMe8tG0MYMLEp60xK9GYVeCdA3g4LowmnVi+t9Y= -k8s.io/pod-security-admission v0.29.2 h1:0/hhZzk1rPmUG2D02OHPWoZgo/3D5Wi/TXfwByD3xLs= -k8s.io/pod-security-admission v0.29.2/go.mod h1:HBi3TJjRgPJmzdkbqtTxZshMf74ppA7Hth4dxGmUZj0= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= -modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= -modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= -modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= -modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= -modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI= +k8s.io/kms v0.31.2/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 h1:MErs8YA0abvOqJ8gIupA1Tz6PKXYUw34XsGlA7uSL1k= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094/go.mod h1:7ioBJr1A6igWjsR2fxq2EZ0mlMwYLejazSIc2bzMp2U= +k8s.io/kubectl v0.31.2 h1:gTxbvRkMBwvTSAlobiTVqsH6S8Aa1aGyBcu5xYLsn8M= +k8s.io/kubectl v0.31.2/go.mod h1:EyASYVU6PY+032RrTh5ahtSOMgoDRIux9V1JLKtG5xM= +k8s.io/kubelet v0.31.2 h1:6Hytyw4LqWqhgzoi7sPfpDGClu2UfxmPmaiXPC4FRgI= +k8s.io/kubelet v0.31.2/go.mod h1:0E4++3cMWi2cJxOwuaQP3eMBa7PSOvAFgkTPlVc/2FA= +k8s.io/kubernetes v1.31.2 h1:VNSu4O7Xn5FFRsh9ePXyEPg6ucR21fOftarSdi053Gs= +k8s.io/kubernetes v1.31.2/go.mod h1:9xmT2buyTYj8TRKwRae7FcuY8k5+xlxv7VivvO0KKfs= +k8s.io/mount-utils v0.31.2 h1:Q0ygX92Lj9d1wcObAzj+JZ4oE7CNKZrqSOn1XcIS+y4= +k8s.io/mount-utils v0.31.2/go.mod h1:HV/VYBUGqYUj4vt82YltzpWvgv8FPg0G9ItyInT3NPU= +k8s.io/pod-security-admission v0.31.2 h1:GQWWShWxYPJSRcMyN3xA1xyYa3ROrAaBBiP9BdIf5Z4= +k8s.io/pod-security-admission v0.31.2/go.mod h1:LXpZT9KdccDH/We169EycuFC79iXmiU4mgpUEJDrf9Q= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= -sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc= -sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.19.1 h1:Son+Q40+Be3QWb+niBXAg2vFiYWolDjjRfO8hn/cxOk= +sigs.k8s.io/controller-runtime v0.19.1/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/check-go-version.sh b/hack/check-go-version.sh deleted file mode 100755 index e27eb5dd..00000000 --- a/hack/check-go-version.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2022 NetApp, Inc. All Rights Reserved. -# Licensed under the Apache License, Version 2.0. - -# The GO_BUILD_VERSION is the official go version we are building the BeeGFS CSI driver with. -GO_BUILD_VERSION="go1.22.0" -INSTALLED_VERSION=$(go version | { read _ _ ver _; echo ${ver}; } ) || { echo >&2 "ERROR: determining version of go failed"; exit 1; } - -if [ "$INSTALLED_VERSION" == "$GO_BUILD_VERSION" ] -then - echo "The installed go version (${INSTALLED_VERSION}) matches the expected build go version (${GO_BUILD_VERSION})." - -else - echo "WARNING - we expect to build with go version ${GO_BUILD_VERSION} but found go version ${INSTALLED_VERSION}" -fi \ No newline at end of file diff --git a/operator/Makefile b/operator/Makefile index 734688e7..54f5c6de 100644 --- a/operator/Makefile +++ b/operator/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 1.6.0 +VERSION ?= 1.7.0 # BUILD_PLATFORMS contains a set of tuples [os arch buildx_platform suffix base_image addon_image] # separated by semicolon. All supported architectures are listed here so other targets used for @@ -181,7 +181,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 +CONTROLLER_TOOLS_VERSION ?= v0.16.0 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize diff --git a/operator/bundle/manifests/beegfs-csi-driver-operator.clusterserviceversion.yaml b/operator/bundle/manifests/beegfs-csi-driver-operator.clusterserviceversion.yaml index 75b1b66e..6f4716c9 100644 --- a/operator/bundle/manifests/beegfs-csi-driver-operator.clusterserviceversion.yaml +++ b/operator/bundle/manifests/beegfs-csi-driver-operator.clusterserviceversion.yaml @@ -63,7 +63,7 @@ metadata: ] capabilities: Seamless Upgrades categories: Storage - containerImage: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.6.0 + containerImage: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.7.0 createdAt: 2021-06-30:03:11:00 description: Install and manage the BeeGFS CSI driver operatorframework.io/suggested-namespace: beegfs-csi @@ -71,7 +71,7 @@ metadata: operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/ThinkParQ/beegfs-csi-driver support: ThinkParQ - name: beegfs-csi-driver-operator.v1.6.0 + name: beegfs-csi-driver-operator.v1.7.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -407,15 +407,6 @@ spec: - apps resources: - daemonsets - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - apps - resources: - statefulsets verbs: - create @@ -453,6 +444,8 @@ spec: - "" resources: - configmaps + - secrets + - serviceaccounts verbs: - create - get @@ -472,6 +465,7 @@ spec: - "" resources: - nodes + - pods verbs: - get - list @@ -503,56 +497,12 @@ spec: - list - patch - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - watch - apiGroups: - rbac.authorization.k8s.io resources: - clusterrolebindings - verbs: - - create - - delete - - get - - list - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - verbs: - - create - - delete - - get - - list - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - rolebindings + - roles verbs: - create - delete @@ -562,12 +512,13 @@ spec: - apiGroups: - rbac.authorization.k8s.io resources: - - roles + - clusterroles verbs: - create - delete - get - list + - update - watch - apiGroups: - security.openshift.io @@ -591,13 +542,6 @@ spec: - storage.k8s.io resources: - csinodes - verbs: - - get - - list - - watch - - apiGroups: - - storage.k8s.io - resources: - storageclasses verbs: - get @@ -654,7 +598,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.6.0 + image: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.7.0 livenessProbe: httpGet: path: /healthz @@ -749,4 +693,4 @@ spec: provider: name: ThinkParQ url: https://www.thinkparq.com - version: 1.6.0 + version: 1.7.0 diff --git a/operator/bundle/manifests/beegfs.csi.netapp.com_beegfsdrivers.yaml b/operator/bundle/manifests/beegfs.csi.netapp.com_beegfsdrivers.yaml index d521f7b4..47b26a45 100644 --- a/operator/bundle/manifests/beegfs.csi.netapp.com_beegfsdrivers.yaml +++ b/operator/bundle/manifests/beegfs.csi.netapp.com_beegfsdrivers.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.0 creationTimestamp: null name: beegfsdrivers.beegfs.csi.netapp.com spec: @@ -132,11 +132,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -147,6 +145,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -190,11 +194,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -205,6 +207,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -248,11 +256,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -263,6 +269,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -306,11 +318,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -321,6 +331,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -364,11 +380,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -379,6 +393,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -473,11 +493,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -505,11 +527,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -522,6 +546,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -566,11 +591,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -598,14 +625,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -664,11 +694,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -696,11 +728,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -713,6 +747,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -757,11 +792,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -789,14 +826,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -1053,16 +1093,8 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1103,12 +1135,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/operator/config/crd/bases/beegfs.csi.netapp.com_beegfsdrivers.yaml b/operator/config/crd/bases/beegfs.csi.netapp.com_beegfsdrivers.yaml index ffa38287..9087206b 100644 --- a/operator/config/crd/bases/beegfs.csi.netapp.com_beegfsdrivers.yaml +++ b/operator/config/crd/bases/beegfs.csi.netapp.com_beegfsdrivers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.0 name: beegfsdrivers.beegfs.csi.netapp.com spec: group: beegfs.csi.netapp.com @@ -126,11 +126,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -141,6 +139,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -184,11 +188,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -199,6 +201,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -242,11 +250,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -257,6 +263,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -300,11 +312,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -315,6 +325,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -358,11 +374,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -373,6 +387,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -467,11 +487,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -499,11 +521,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -516,6 +540,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -560,11 +585,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -592,14 +619,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -658,11 +688,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -690,11 +722,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: @@ -707,6 +741,7 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: description: |- If the affinity requirements specified by this field are not met at @@ -751,11 +786,13 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: description: A list of node selector requirements by node's fields. @@ -783,14 +820,17 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object @@ -1047,16 +1087,8 @@ spec: properties: conditions: items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1097,12 +1129,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/operator/config/manager/kustomization.yaml b/operator/config/manager/kustomization.yaml index feaed797..57c909ec 100644 --- a/operator/config/manager/kustomization.yaml +++ b/operator/config/manager/kustomization.yaml @@ -13,4 +13,4 @@ kind: Kustomization images: - name: controller newName: ghcr.io/thinkparq/beegfs-csi-driver-operator - newTag: v1.6.0 + newTag: v1.7.0 diff --git a/operator/config/manifests/bases/beegfs-csi-driver-operator.clusterserviceversion.yaml b/operator/config/manifests/bases/beegfs-csi-driver-operator.clusterserviceversion.yaml index d6b1a2c8..e8e29186 100644 --- a/operator/config/manifests/bases/beegfs-csi-driver-operator.clusterserviceversion.yaml +++ b/operator/config/manifests/bases/beegfs-csi-driver-operator.clusterserviceversion.yaml @@ -5,7 +5,7 @@ metadata: alm-examples: '[]' capabilities: Seamless Upgrades categories: Storage - containerImage: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.6.0 + containerImage: ghcr.io/thinkparq/beegfs-csi-driver-operator:v1.7.0 createdAt: 2021-06-30:03:11:00 description: Install and manage the BeeGFS CSI driver operatorframework.io/suggested-namespace: beegfs-csi diff --git a/operator/config/rbac/role.yaml b/operator/config/rbac/role.yaml index 4fb66858..3b047e27 100644 --- a/operator/config/rbac/role.yaml +++ b/operator/config/rbac/role.yaml @@ -8,15 +8,6 @@ rules: - apps resources: - daemonsets - verbs: - - create - - get - - list - - update - - watch -- apiGroups: - - apps - resources: - statefulsets verbs: - create @@ -54,6 +45,8 @@ rules: - "" resources: - configmaps + - secrets + - serviceaccounts verbs: - create - get @@ -73,6 +66,7 @@ rules: - "" resources: - nodes + - pods verbs: - get - list @@ -104,56 +98,12 @@ rules: - list - patch - watch -- apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - watch - apiGroups: - rbac.authorization.k8s.io resources: - clusterrolebindings - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - verbs: - - create - - delete - - get - - list - - watch -- apiGroups: - - rbac.authorization.k8s.io - resources: - rolebindings + - roles verbs: - create - delete @@ -163,12 +113,13 @@ rules: - apiGroups: - rbac.authorization.k8s.io resources: - - roles + - clusterroles verbs: - create - delete - get - list + - update - watch - apiGroups: - security.openshift.io @@ -192,13 +143,6 @@ rules: - storage.k8s.io resources: - csinodes - verbs: - - get - - list - - watch -- apiGroups: - - storage.k8s.io - resources: - storageclasses verbs: - get diff --git a/operator/controllers/beegfsdriver_controller.go b/operator/controllers/beegfsdriver_controller.go index 580266d2..e3ecd5a3 100644 --- a/operator/controllers/beegfsdriver_controller.go +++ b/operator/controllers/beegfsdriver_controller.go @@ -59,7 +59,7 @@ type BeegfsDriverReconciler struct { // The operator must have the following permissions to deploy the driver. //+kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create -//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create;delete;update //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,verbs=get;list;watch;create;delete //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;delete //+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;delete diff --git a/operator/hack/minikube_deploy_all_examples.sh b/operator/hack/minikube_deploy_all_examples.sh index 2dcc5e8d..9e4f2132 100755 --- a/operator/hack/minikube_deploy_all_examples.sh +++ b/operator/hack/minikube_deploy_all_examples.sh @@ -8,7 +8,7 @@ set -euo pipefail # IMPORTANT: This script is not idempotent, notably the step to create directories in BeeGFS. -export BEEGFS_VERSION=7.4.2 +export BEEGFS_VERSION=7.4.5 export BEEGFS_SECRET=mysecret export BEEGFS_REGISTRY=ghcr.io/thinkparq/ diff --git a/operator/main.go b/operator/main.go index 75f49326..55ff2c1a 100644 --- a/operator/main.go +++ b/operator/main.go @@ -28,8 +28,11 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" beegfsv1 "github.com/netapp/beegfs-csi-driver/operator/api/v1" "github.com/netapp/beegfs-csi-driver/operator/controllers" @@ -63,12 +66,35 @@ func main() { ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + // Watch all namespaces, or only the namespace provided by the Downward API: + var defaultNamespaces map[string]cache.Config + if driverNamespace := os.Getenv("BEEGFS_CSI_DRIVER_NAMESPACE"); driverNamespace != "" { + defaultNamespaces = map[string]cache.Config{ + driverNamespace: {}, + } + } + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, - // Watch the namespace provided by the Downward API or all namespaces. - Namespace: os.Getenv("BEEGFS_CSI_DRIVER_NAMESPACE"), - MetricsBindAddress: metricsAddr, - Port: 9443, + // The old Namespace field was deprecated in: + // https://github.com/kubernetes-sigs/controller-runtime/commit/0377e0f7a41bd9d88d43811192309b193c9cd793#diff-d500fbd6a2aa620607ca5e2a7c3ac4f1a4c82309d1a549561e92abfcb18f2f0e. + // More details are in the commit that added the DefaultNamespaces field to cache.Options: + // https://github.com/kubernetes-sigs/controller-runtime/commit/3e35cab1ea29145d8699259b079633a2b8cfc116#diff-964e351ee2375d359c78d69e514c4edc42577219761c4475f391ed2daf715e51. + Cache: cache.Options{ + DefaultNamespaces: defaultNamespaces, + }, + // The old MetricsBindAddress field was deprecated in: + // https://github.com/kubernetes-sigs/controller-runtime/commit/e59161ee8f41b2f24953419533dca84d30262c21#diff-d500fbd6a2aa620607ca5e2a7c3ac4f1a4c82309d1a549561e92abfcb18f2f0eR36. + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + }, + // The old Port field was deprecated in: + // https://github.com/kubernetes-sigs/controller-runtime/commit/f8c6ee427c500fb6ff8f8ee595110661c81da4e7#diff-d500fbd6a2aa620607ca5e2a7c3ac4f1a4c82309d1a549561e92abfcb18f2f0e. + WebhookServer: &webhook.DefaultServer{ + Options: webhook.Options{ + Port: 9443, + }, + }, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "0d697945.csi.netapp.com", diff --git a/pkg/beegfs/controllerserver.go b/pkg/beegfs/controllerserver.go index cdd591bb..6ea0df9f 100644 --- a/pkg/beegfs/controllerserver.go +++ b/pkg/beegfs/controllerserver.go @@ -57,6 +57,7 @@ type controllerServer struct { volumeIDsInFlight *threadSafeStringLock volumeStatusMap *threadSafeStatusMap nodeUnstageTimeout uint64 + csi.UnimplementedControllerServer } func newControllerServer(nodeID string, pluginConfig beegfsv1.PluginConfig, clientConfTemplatePath, csDataDir string, @@ -396,6 +397,10 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, in *csi.Con return nil, status.Error(codes.Unimplemented, "") } +func (cs *controllerServer) ControllerModifyVolume(ctx context.Context, in *csi.ControllerModifyVolumeRequest) (*csi.ControllerModifyVolumeResponse, error) { + return nil, status.Error(codes.Unimplemented, "") +} + // getControllerServiceCapabilities will convert a slice of ControllerServiceCapability_RPC_Type entries to a slice // of ControllerServiceCapability structs. This makes it easier to define a set of ControllerServiceCapabilities by Type // and then pass them on with the more complicated ControllerServiceCapability structure. diff --git a/pkg/beegfs/identityserver.go b/pkg/beegfs/identityserver.go index 2e0d1a67..68b5b170 100644 --- a/pkg/beegfs/identityserver.go +++ b/pkg/beegfs/identityserver.go @@ -31,6 +31,7 @@ import ( type identityServer struct { name string version string + csi.UnimplementedIdentityServer } func newIdentityServer(name, version string) *identityServer { diff --git a/pkg/beegfs/nodeserver.go b/pkg/beegfs/nodeserver.go index 3bf4a892..501b9479 100644 --- a/pkg/beegfs/nodeserver.go +++ b/pkg/beegfs/nodeserver.go @@ -46,6 +46,7 @@ type nodeServer struct { pluginConfig beegfsv1.PluginConfig clientConfTemplatePath string mounter mount.Interface + csi.UnimplementedNodeServer } func newNodeServer(nodeID string, pluginConfig beegfsv1.PluginConfig, clientConfTemplatePath string) (*nodeServer, error) { diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 38cd7824..89e999a3 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -117,7 +117,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { var pluginConfig beegfsv1.PluginConfig err := yaml.UnmarshalStrict(driverConfigBytes, &pluginConfig) e2eframework.ExpectNoError(err, "expected to successfully unmarshal PluginConfig") - e2eframework.ExpectNotEqual(len(pluginConfig.FileSystemSpecificConfigs), 0, + gomega.Expect(len(pluginConfig.FileSystemSpecificConfigs)).NotTo(gomega.Equal(0), "expected PluginConfig to specifically reference at least one file system") beegfsDriver.SetPerFSConfigs(pluginConfig.FileSystemSpecificConfigs) beegfsDynamicDriver.SetPerFSConfigs(pluginConfig.FileSystemSpecificConfigs) diff --git a/test/e2e/testsuites/beegfs.go b/test/e2e/testsuites/beegfs.go index 507c3d07..243beba8 100644 --- a/test/e2e/testsuites/beegfs.go +++ b/test/e2e/testsuites/beegfs.go @@ -210,7 +210,7 @@ func (b *beegfsTestSuite) DefineTests(tDriver storageframework.TestDriver, patte } unusedPool := utils.GetUnusedPoolId(validPools) // Ensure that we actually found an unused pool - e2eframework.ExpectNotEqual(unusedPool, "") + gomega.Expect(unusedPool).NotTo(gomega.Equal("")) // Now set the storageclass params to use the invalid storage pool id d.SetStorageClassParams(map[string]string{ @@ -319,7 +319,7 @@ func (b *beegfsTestSuite) DefineTests(tDriver storageframework.TestDriver, patte // Based on this commit https://github.com/kubernetes/kubernetes/commit/dfdf88d4faafa6fd39988832ea0ef6d668f490e9 // this is the new way to call ExecWithOptions. _, stdErr, err := e2epod.ExecWithOptions(f, execOptions) - e2eframework.ExpectError(err) // The touch should not be successful. + gomega.Expect(err).To(gomega.HaveOccurred()) // The touch should not be successful. // Added logging so we can see why this test is failing. // TODO(kcole, A472): Remove when the root cause of failures has been fixed. diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go index 7baa935b..b452d195 100644 --- a/test/e2e/utils/utils.go +++ b/test/e2e/utils/utils.go @@ -107,13 +107,13 @@ func ArchiveServiceLogs(cs clientset.Interface, reportPath string) error { // Dump logs of each node pod's beegfs container. nodePods, err := GetRunningNodePods(cs) if err != nil { - e2eframework.Logf("Failed to get node pods due to error: %w", err) + e2eframework.Logf("Failed to get node pods due to error: %s", err) returnError = err } else { // Only proceed if we have node pods to get get logs from. for _, nodePod := range nodePods { logs, err := e2epod.GetPodLogs(context.TODO(), cs, nodePod.Namespace, nodePod.Name, "beegfs") if err != nil { - e2eframework.Logf("Failed to get logs for node container beegfs due to error: %w", err) + e2eframework.Logf("Failed to get logs for node container beegfs due to error: %s", err) if returnError == nil { returnError = err } @@ -126,13 +126,13 @@ func ArchiveServiceLogs(cs clientset.Interface, reportPath string) error { // Dump logs of controller pod's beegfs and csi-provisioner containers controllerPod, err := GetRunningControllerPod(cs) if err != nil { - e2eframework.Logf("Failed to get controller pod due to error: %w", err) + e2eframework.Logf("Failed to get controller pod due to error: %s", err) returnError = err } else { // Only proceed if we have a controller pod to get get logs from. for _, containerName := range []string{"beegfs", "csi-provisioner"} { logs, err := e2epod.GetPodLogs(context.TODO(), cs, controllerPod.Namespace, controllerPod.Name, containerName) if err != nil { - e2eframework.Logf("Failed to get logs for controller container %s due to error: %w", containerName, err) + e2eframework.Logf("Failed to get logs for controller container %s due to error: %s", containerName, err) if returnError == nil { returnError = err } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE deleted file mode 100644 index 52cf18e4..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright 2021 The ANTLR Project - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go deleted file mode 100644 index ab512126..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Package antlr implements the Go version of the ANTLR 4 runtime. - -# The ANTLR Tool - -ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, -or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. -From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface -(or visitor) that makes it easy to respond to the recognition of phrases of interest. - -# Code Generation - -ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a -runtime library, written specifically to support the generated code in the target language. This library is the -runtime for the Go target. - -To generate code for the go target, it is generally recommended to place the source grammar files in a package of -their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory -it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean -that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other -way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in -your IDE, or configuration in your CI system. - -Here is a general template for an ANTLR based recognizer in Go: - - . - ├── myproject - ├── parser - │ ├── mygrammar.g4 - │ ├── antlr-4.12.0-complete.jar - │ ├── error_listeners.go - │ ├── generate.go - │ ├── generate.sh - ├── go.mod - ├── go.sum - ├── main.go - └── main_test.go - -Make sure that the package statement in your grammar file(s) reflects the go package they exist in. -The generate.go file then looks like this: - - package parser - - //go:generate ./generate.sh - -And the generate.sh file will look similar to this: - - #!/bin/sh - - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' - antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 - -depending on whether you want visitors or listeners or any other ANTLR options. - -From the command line at the root of your package “myproject” you can then simply issue the command: - - go generate ./... - -# Copyright Notice - -Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - -Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. - -[target languages]: https://github.com/antlr/antlr4/tree/master/runtime -[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt -*/ -package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go deleted file mode 100644 index 7619fa17..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - Equals(o Collectable[ATNConfig]) bool - Hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// Equals is the default comparison function for an ATNConfig when no specialist implementation is required -// for a collection. -// -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { - if b == o { - return true - } else if o == nil { - return false - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.Equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.Equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -// Hash is the default hash function for BaseATNConfig, when no specialist hash function -// is required for a collection -func (b *BaseATNConfig) Hash() int { - var c int - if b.context != nil { - c = b.context.Hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.Hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.GetStateNumber()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.Hash()) - h = murmurUpdate(h, l.semanticContext.Hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.Hash()) - h = murmurFinish(h, 6) - return h -} - -// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { - if l == other { - return true - } - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.Equals(othert.BaseATNConfig) -} - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go deleted file mode 100644 index 43e9b33f..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type ATNConfigSet interface { - Hash() int - Equals(o Collectable[ATNConfig]) bool - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *JStore[ATNState, Comparator[ATNState]] - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - Alts() *BitSet - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *JStore[ATNConfig, Comparator[ATNConfig]] - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not, protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func (b *BaseATNConfigSet) Alts() *BitSet { - alts := NewBitSet() - for _, it := range b.configs { - alts.add(it.GetAlt()) - } - return alts -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing, present := b.configLookup.Put(config) - - // The config was not already in the set - // - if !present { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { - - // states uses the standard comparator provided by the ATNState instance - // - states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) - - for i := 0; i < len(b.configs); i++ { - states.Put(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.Len() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -// Compare is a hack function just to verify that adding DFAstares to the known -// set works, so long as comparison of ATNConfigSet s works. For that to work, we -// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't -// know the order, so we do this inefficient hack. If this proves the point, then -// we can change the config set to a better structure. -func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { - if len(b.configs) != len(bs.configs) { - return false - } - - for _, c := range b.configs { - found := false - for _, c2 := range bs.configs { - if c.Equals(c2) { - found = true - break - } - } - - if !found { - return false - } - - } - return true -} - -func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext && - b.Compare(other2) -} - -func (b *BaseATNConfigSet) Hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := 1 - for _, config := range b.configs { - h = 31*h + config.Hash() - } - return h -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - // This set uses the standard Hash() and Equals() from ATNConfig - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func hashATNConfig(i interface{}) int { - o := i.(ATNConfig) - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { - return false - } - - if ai.GetAlt() != bi.GetAlt() { - return false - } - - return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go deleted file mode 100644 index 41529115..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go deleted file mode 100644 index 1f2a56bc..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go +++ /dev/null @@ -1,393 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - Hash() int - Equals(Collectable[ATNState]) bool -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) Hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &BasicBlockStartState{} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &PlusBlockStartState{} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &StarBlockStartState{} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go deleted file mode 100644 index f679f0dc..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// Provides a default instance of {@link ConsoleErrorListener}. -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go deleted file mode 100644 index 5c0a637b..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - InErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //InErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -//
    -//
  • {@link NoViableAltException}: Dispatches the call to -// {@link //ReportNoViableAlternative}
  • -//
  • {@link InputMisMatchException}: Dispatches the call to -// {@link //ReportInputMisMatch}
  • -//
  • {@link FailedPredicateException}: Dispatches the call to -// {@link //ReportFailedPredicate}
  • -//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report -// the exception
  • -//
-func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.InErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.InErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ”} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// # EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - if parent, ok := context.GetParent().(ParserRuleContext); ok { - context = parent - } else { - context = nil - } - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go deleted file mode 100644 index 3954c137..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go deleted file mode 100644 index bd6ad5ef..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go deleted file mode 100644 index a8b889ce..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go deleted file mode 100644 index e5a74f0c..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go +++ /dev/null @@ -1,198 +0,0 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -import ( - "sort" -) - -// Collectable is an interface that a struct should implement if it is to be -// usable as a key in these collections. -type Collectable[T any] interface { - Hash() int - Equals(other Collectable[T]) bool -} - -type Comparator[T any] interface { - Hash1(o T) int - Equals2(T, T) bool -} - -// JStore implements a container that allows the use of a struct to calculate the key -// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just -// serve the needs of the ANTLR Go runtime. -// -// For ease of porting the logic of the runtime from the master target (Java), this collection -// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() -// function as the key. The values are stored in a standard go map which internally is a form of hashmap -// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with -// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't -// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and -// we understand the requirements, then this is fine - this is not a general purpose collection. -type JStore[T any, C Comparator[T]] struct { - store map[int][]T - len int - comparator Comparator[T] -} - -func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { - - if comparator == nil { - panic("comparator cannot be nil") - } - - s := &JStore[T, C]{ - store: make(map[int][]T, 1), - comparator: comparator, - } - return s -} - -// Put will store given value in the collection. Note that the key for storage is generated from -// the value itself - this is specifically because that is what ANTLR needs - this would not be useful -// as any kind of general collection. -// -// If the key has a hash conflict, then the value will be added to the slice of values associated with the -// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is -// tested by calling the equals() method on the key. -// -// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true -// -// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. -func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn - - kh := s.comparator.Hash1(value) - - for _, v1 := range s.store[kh] { - if s.comparator.Equals2(value, v1) { - return v1, true - } - } - s.store[kh] = append(s.store[kh], value) - s.len++ - return value, false -} - -// Get will return the value associated with the key - the type of the key is the same type as the value -// which would not generally be useful, but this is a specific thing for ANTLR where the key is -// generated using the object we are going to store. -func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn - - kh := s.comparator.Hash1(key) - - for _, v := range s.store[kh] { - if s.comparator.Equals2(key, v) { - return v, true - } - } - return key, false -} - -// Contains returns true if the given key is present in the store -func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn - - _, present := s.Get(key) - return present -} - -func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { - vs := make([]T, 0, len(s.store)) - for _, v := range s.store { - vs = append(vs, v...) - } - sort.Slice(vs, func(i, j int) bool { - return less(vs[i], vs[j]) - }) - - return vs -} - -func (s *JStore[T, C]) Each(f func(T) bool) { - for _, e := range s.store { - for _, v := range e { - f(v) - } - } -} - -func (s *JStore[T, C]) Len() int { - return s.len -} - -func (s *JStore[T, C]) Values() []T { - vs := make([]T, 0, len(s.store)) - for _, e := range s.store { - for _, v := range e { - vs = append(vs, v) - } - } - return vs -} - -type entry[K, V any] struct { - key K - val V -} - -type JMap[K, V any, C Comparator[K]] struct { - store map[int][]*entry[K, V] - len int - comparator Comparator[K] -} - -func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { - return &JMap[K, V, C]{ - store: make(map[int][]*entry[K, V], 1), - comparator: comparator, - } -} - -func (m *JMap[K, V, C]) Put(key K, val V) { - kh := m.comparator.Hash1(key) - - m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) - m.len++ -} - -func (m *JMap[K, V, C]) Values() []V { - vs := make([]V, 0, len(m.store)) - for _, e := range m.store { - for _, v := range e { - vs = append(vs, v.val) - } - } - return vs -} - -func (m *JMap[K, V, C]) Get(key K) (V, bool) { - - var none V - kh := m.comparator.Hash1(key) - for _, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - return e.val, true - } - } - return none, false -} - -func (m *JMap[K, V, C]) Len() int { - return len(m.store) -} - -func (m *JMap[K, V, C]) Delete(key K) { - kh := m.comparator.Hash1(key) - for i, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) - m.len-- - return - } - } -} - -func (m *JMap[K, V, C]) Clear() { - m.store = make(map[int][]*entry[K, V]) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go deleted file mode 100644 index be1ba7a7..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "golang.org/x/exp/slices" - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) Hash() int { - if l == nil { - // TODO: Why is this here? l should not be nil - return 61 - } - - // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode - return l.cachedHash -} - -func (l *LexerActionExecutor) Equals(other interface{}) bool { - if l == other { - return true - } - othert, ok := other.(*LexerActionExecutor) - if !ok { - return false - } - if othert == nil { - return false - } - if l.cachedHash != othert.cachedHash { - return false - } - if len(l.lexerActions) != len(othert.lexerActions) { - return false - } - return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { - return i.Equals(j) - }) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go deleted file mode 100644 index 76689615..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -// * -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.Contains(c) { - return - } - - _, present := lookBusy.Put(c) - if present { - return - - } - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - removed := calledRuleStack.contains(s.GetRuleIndex()) - defer func() { - if removed { - calledRuleStack.add(s.GetRuleIndex()) - } - }() - calledRuleStack.remove(s.GetRuleIndex()) - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go deleted file mode 100644 index 8bcc46a0..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ /dev/null @@ -1,1559 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - p.atn.stateMu.RLock() - if dfa.getPrecedenceDfa() { - p.atn.edgeMu.RLock() - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - p.atn.edgeMu.RUnlock() - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.getS0() - } - p.atn.stateMu.RUnlock() - - if s0 == nil { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - if ParserATNSimulatorDebug { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) - - p.atn.stateMu.Lock() - if dfa.getPrecedenceDfa() { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0Closure - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - p.atn.edgeMu.Lock() - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - p.atn.edgeMu.Unlock() - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setS0(s0) - } - p.atn.stateMu.Unlock() - } - - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - ", DFA state " + s0.String() + - ", LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - - switch alts.length() { - case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - case 1: - return alts.minValue() - default: - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - if t+1 < 0 { - return nil - } - - p.atn.edgeMu.RLock() - defer p.atn.edgeMu.RUnlock() - edges := previousD.getEdges() - if edges == nil || t+1 >= len(edges) { - return nil - } - return previousD.getIthEdge(t + 1) -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } - if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var skippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - if _, ok := c.GetState().(*RuleStopState); ok { - if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for _, trans := range c.GetState().GetTransitions() { - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if skippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - treatEOFAsEpsilon := t == TokenEOF - amount := len(intermediate.configs) - for k := 0; k < amount; k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(skippedStopStates); l++ { - reach.Add(skippedStopStates[l], p.mergeCache) - } - } - - if ParserATNSimulatorTraceATNSim { - fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) - } - - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - if _, ok := config.GetState().(*RuleStopState); ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("computeStartState from ATN state " + a.String() + - " initialContext=" + initialContext.String()) - } - - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.Equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i <= nalts; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { - fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - if _, ok := config.GetState().(*RuleStopState); ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { - continue - } - - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if p.dfa != nil && p.dfa.getPrecedenceDfa() { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for right-recursive rules - continue - } - - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else { - - if !t.getIsEpsilon() { - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for EOF* and EOF+ - continue - } - } - if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { - return false - } - - _p := config.GetState() - - // First check to see if we are in StarLoopEntryState generated during - // left-recursion elimination. For efficiency, also check if - // the context has an empty stack case. If so, it would mean - // global FOLLOW so we can't perform optimization - if _p.GetStateType() != ATNStateStarLoopEntry { - return false - } - startLoop, ok := _p.(*StarLoopEntryState) - if !ok { - return false - } - if !startLoop.precedenceRuleDecision || - config.GetContext().isEmpty() || - config.GetContext().hasEmptyPath() { - return false - } - - // Require all return states to return back to the same rule - // that p is in. - numCtxs := config.GetContext().length() - for i := 0; i < numCtxs; i++ { - returnState := p.atn.states[config.GetContext().getReturnState(i)] - if returnState.GetRuleIndex() != _p.GetRuleIndex() { - return false - } - } - x := _p.GetTransitions()[0].getTarget() - decisionStartState := x.(BlockStartState) - blockEndStateNum := decisionStartState.getEndState().stateNumber - blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) - - // Verify that the top of each stack context leads to loop entry/exit - // state through epsilon edges and w/o leaving rule. - - for i := 0; i < numCtxs; i++ { // for each stack context - returnStateNumber := config.GetContext().getReturnState(i) - returnState := p.atn.states[returnStateNumber] - - // all states must have single outgoing epsilon edge - if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { - return false - } - - // Look for prefix op case like 'not expr', (' type ')' expr - returnStateTarget := returnState.GetTransitions()[0].getTarget() - if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { - continue - } - - // Look for 'expr op expr' or case where expr's return state is block end - // of (...)* internal block; the block end points to loop back - // which points to p but we don't need to check that - if returnState == blockEndState { - continue - } - - // Look for ternary expr ? expr : expr. The return state points at block end, - // which points at loop entry state - if returnStateTarget == blockEndState { - continue - } - - // Look for complex prefix 'between expr and expr' case where 2nd expr's - // return state points at block end state of (...)* internal block - if returnStateTarget.GetStateType() == ATNStateBlockEnd && - len(returnStateTarget.GetTransitions()) == 1 && - returnStateTarget.GetTransitions()[0].getIsEpsilon() && - returnStateTarget.GetTransitions()[0].getTarget() == _p { - continue - } - - // anything else ain't conforming - return false - } - - return true -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - var sb strings.Builder - sb.Grow(32) - - sb.WriteString("') - return sb.String() -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM, TransitionRANGE, TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && (!pt.isCtxDependent || inContext) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { - return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - p.atn.stateMu.Lock() - to = p.addDFAState(dfa, to) // used existing if possible not incoming - p.atn.stateMu.Unlock() - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - p.atn.edgeMu.Lock() - if from.getEdges() == nil { - from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) - } - from.setIthEdge(t+1, to) // connect - p.atn.edgeMu.Unlock() - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - existing, present := dfa.states.Get(d) - if present { - if ParserATNSimulatorTraceATNSim { - fmt.Print("addDFAState " + d.String() + " exists") - } - return existing - } - - // The state was not present, so update it with configs - // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { - fmt.Println("addDFAState new " + d.String()) - } - - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go deleted file mode 100644 index ba62af36..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ /dev/null @@ -1,806 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc -} - -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems - if M == a { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), a) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), b) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), M) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) - } - return M -} - -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go deleted file mode 100644 index 7b9b72fa..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

-// -//

When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) - - for _, c := range configs.GetItems() { - - alts, ok := configToAlts.Get(c) - if !ok { - alts = NewBitSet() - configToAlts.Put(c, alts) - } - alts.add(c.GetAlt()) - } - - return configToAlts.Values() -} - -// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go deleted file mode 100644 index bfe542d0..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go deleted file mode 100644 index 210699ba..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go deleted file mode 100644 index b3e38af3..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "fmt" -) - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-//

- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

- -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

- -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

- -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
- -//

-// Then in the rules, you can execute (assuming rewriter is visible):

- -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
- -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
- -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

- -const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index -} - -func (op *BaseRewriteOperation) GetIndex() int { - return op.index -} - -func (op *BaseRewriteOperation) GetText() string { - return op.text -} - -func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name -} - -func (op *BaseRewriteOperation) GetTokens() TokenStream { - return op.tokens -} - -func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val -} - -func (op *BaseRewriteOperation) SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation) SetText(val string) { - op.text = val -} - -func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val -} - -func (op *BaseRewriteOperation) SetTokens(val TokenStream) { - op.tokens = val -} - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { - return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct { - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, - }, - LastIndex: to, - } -} - -func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { - if op.text != "" { - buffer.WriteString(op.text) - } - return op.LastIndex + 1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) - } - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) -} - -type TokenStreamRewriter struct { - //Our source stream - tokens TokenStream - // You may have multiple, named streams of rewrite operations. - // I'm calling these things "programs." - // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int -} - -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { - return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), - }, - last_rewrite_token_indexes: map[string]int{}, - } -} - -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { - return tsr.tokens -} - -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { - is, ok := tsr.programs[program_name] - if ok { - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] - } -} - -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { - tsr.Rollback(Default_Program_Name, instruction_index) -} - -// Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included -} - -func (tsr *TokenStreamRewriter) DeleteProgramDefault() { - tsr.DeleteProgram(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { - // to insert after, just insert before next index (even if past end) - var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { - tsr.InsertAfter(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { - var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { - tsr.InsertBefore(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", - from, to, tsr.tokens.Size())) - } - var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { - tsr.ReplaceDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { - tsr.ReplaceToken(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { - tsr.ReplaceTokenDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { - tsr.Replace(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { - tsr.Delete(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { - tsr.DeleteDefault(index, index) -} - -func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { - tsr.DeleteToken(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok { - return -1 - } - return i -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { - tsr.last_rewrite_token_indexes[program_name] = i -} - -func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { - is := make([]RewriteOperation, 0, Program_Init_Size) - tsr.programs[name] = is - return is -} - -func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { - is := tsr.GetProgram(name) - is = append(is, op) - tsr.programs[name] = is -} - -func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { - is, ok := tsr.programs[name] - if !ok { - is = tsr.InitializeProgram(name) - } - return is -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetTextDefault() string { - return tsr.GetText( - Default_Program_Name, - NewInterval(0, tsr.tokens.Size()-1)) -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] - start := interval.Start - stop := interval.Stop - // ensure start/end are in range - stop = min(stop, tsr.tokens.Size()-1) - start = max(start, 0) - if rewrites == nil || len(rewrites) == 0 { - return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute - } - buf := bytes.Buffer{} - // First, optimize instruction stream - indexToOp := reduceToSingleOperationPerIndex(rewrites) - // Walk buffer, executing instructions and emitting tokens - for i := start; i <= stop && i < tsr.tokens.Size(); { - op := indexToOp[i] - delete(indexToOp, i) // remove so any left have index size-1 - t := tsr.tokens.Get(i) - if op == nil { - // no operation at that index, just dump token - if t.GetTokenType() != TokenEOF { - buf.WriteString(t.GetText()) - } - i++ // move to next token - } else { - i = op.Execute(&buf) // execute operation and skip - } - } - // include stuff after end if it's last index in buffer - // So, if they did an insertAfter(lastValidIndex, "foo"), include - // foo if end==lastValidIndex. - if stop == tsr.tokens.Size()-1 { - // Scan any remaining operations after last token - // should be included (they will be inserts). - for _, op := range indexToOp { - if op.GetIndex() >= tsr.tokens.Size()-1 { - buf.WriteString(op.GetText()) - } - } - } - return buf.String() -} - -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: -// -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu -// -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR -// -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) -// -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping -// -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u -// -// First we need to examine replaces. For any replace op: -// -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. -// -// Then we can deal with inserts: -// -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace -// -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. -// -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. -// -// Return a map from token index to operation. -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { - // WALK REPLACES - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - rop, ok := op.(*ReplaceOp) - if !ok { - continue - } - // Wipe prior inserts within range - for j := 0; j < i && j < len(rewrites); j++ { - if iop, ok := rewrites[j].(*InsertBeforeOp); ok { - if iop.index == rop.index { - // E.g., insert before 2, delete 2..2; update replace - // text to include insert before, kill insert - rewrites[iop.instruction_index] = nil - if rop.text != "" { - rop.text = iop.text + rop.text - } else { - rop.text = iop.text - } - } else if iop.index > rop.index && iop.index <= rop.LastIndex { - // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil - } - } - } - // Drop any prior replaces contained within - for j := 0; j < i && j < len(rewrites); j++ { - if prevop, ok := rewrites[j].(*ReplaceOp); ok { - if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { - // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil - continue - } - // throw exception unless disjoint or identical - disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex - // Delete special case of replace (text==null): - // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint { - rewrites[prevop.instruction_index] = nil - rop.index = min(prevop.index, rop.index) - rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - } else if !disjoint { - panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) - } - } - } - } - // WALK INSERTS - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - //hack to replicate inheritance in composition - _, iok := rewrites[i].(*InsertBeforeOp) - _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok { - continue - } - iop := rewrites[i] - // combine current insert with prior if any at same index - // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j := 0; j < i && j < len(rewrites); j++ { - if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { - if nextIop.index == iop.GetIndex() { - iop.SetText(nextIop.text + iop.GetText()) - rewrites[j] = nil - } - } - if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { - if prevIop.index == iop.GetIndex() { - iop.SetText(iop.GetText() + prevIop.text) - rewrites[prevIop.instruction_index] = nil - } - } - } - // look for replaces where iop.index is in range; error - for j := 0; j < i && j < len(rewrites); j++ { - if rop, ok := rewrites[j].(*ReplaceOp); ok { - if iop.GetIndex() == rop.index { - rop.text = iop.GetText() + rop.text - rewrites[i] = nil - continue - } - if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { - panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) - } - } - } - } - m := map[int]RewriteOperation{} - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - if _, ok := m[op.GetIndex()]; ok { - panic("should only be one op per index") - } - m[op.GetIndex()] = op - } - return m -} - -/* - Quick fixing Go lack of overloads -*/ - -func max(a, b int) int { - if a > b { - return a - } else { - return b - } -} -func min(a, b int) int { - if a < b { - return a - } else { - return b - } -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go deleted file mode 100644 index 36be4f73..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -// atom, set, epsilon, action, predicate, rule transitions. -// -//

This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - var sb strings.Builder - sb.WriteByte('\'') - sb.WriteRune(rune(t.start)) - sb.WriteString("'..'") - sb.WriteRune(rune(t.stop)) - sb.WriteByte('\'') - return sb.String() -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go deleted file mode 100644 index 85b4f137..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} -// then by triggering the event specific to the given parse tree node -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go deleted file mode 100644 index 9fad5d91..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "errors" - "fmt" - "math/bits" - "strconv" - "strings" -) - -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// A simple integer stack - -type IntStack []int - -var ErrEmptyStack = errors.New("Stack is empty") - -func (s *IntStack) Pop() (int, error) { - l := len(*s) - 1 - if l < 0 { - return 0, ErrEmptyStack - } - v := (*s)[l] - *s = (*s)[0:l] - return v, nil -} - -func (s *IntStack) Push(e int) { - *s = append(*s, e) -} - -type comparable interface { - Equals(other Collectable[any]) bool -} - -func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - - return a.Equals(b) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.Hash() - } - - panic("Not Hasher") -} - -type hasher interface { - Hash() int -} - -const bitsPerWord = 64 - -func indexForBit(bit int) int { - return bit / bitsPerWord -} - -func wordForBit(data []uint64, bit int) uint64 { - idx := indexForBit(bit) - if idx >= len(data) { - return 0 - } - return data[idx] -} - -func maskForBit(bit int) uint64 { - return uint64(1) << (bit % bitsPerWord) -} - -func wordsNeeded(bit int) int { - return indexForBit(bit) + 1 -} - -type BitSet struct { - data []uint64 -} - -func NewBitSet() *BitSet { - return &BitSet{} -} - -func (b *BitSet) add(value int) { - idx := indexForBit(value) - if idx >= len(b.data) { - size := wordsNeeded(value) - data := make([]uint64, size) - copy(data, b.data) - b.data = data - } - b.data[idx] |= maskForBit(value) -} - -func (b *BitSet) clear(index int) { - idx := indexForBit(index) - if idx >= len(b.data) { - return - } - b.data[idx] &= ^maskForBit(index) -} - -func (b *BitSet) or(set *BitSet) { - // Get min size necessary to represent the bits in both sets. - bLen := b.minLen() - setLen := set.minLen() - maxLen := intMax(bLen, setLen) - if maxLen > len(b.data) { - // Increase the size of len(b.data) to repesent the bits in both sets. - data := make([]uint64, maxLen) - copy(data, b.data) - b.data = data - } - // len(b.data) is at least setLen. - for i := 0; i < setLen; i++ { - b.data[i] |= set.data[i] - } -} - -func (b *BitSet) remove(value int) { - b.clear(value) -} - -func (b *BitSet) contains(value int) bool { - idx := indexForBit(value) - if idx >= len(b.data) { - return false - } - return (b.data[idx] & maskForBit(value)) != 0 -} - -func (b *BitSet) minValue() int { - for i, v := range b.data { - if v == 0 { - continue - } - return i*bitsPerWord + bits.TrailingZeros64(v) - } - return 2147483647 -} - -func (b *BitSet) equals(other interface{}) bool { - otherBitSet, ok := other.(*BitSet) - if !ok { - return false - } - - if b == otherBitSet { - return true - } - - // We only compare set bits, so we cannot rely on the two slices having the same size. Its - // possible for two BitSets to have different slice lengths but the same set bits. So we only - // compare the relevant words and ignore the trailing zeros. - bLen := b.minLen() - otherLen := otherBitSet.minLen() - - if bLen != otherLen { - return false - } - - for i := 0; i < bLen; i++ { - if b.data[i] != otherBitSet.data[i] { - return false - } - } - - return true -} - -func (b *BitSet) minLen() int { - for i := len(b.data); i > 0; i-- { - if b.data[i-1] != 0 { - return i - } - } - return 0 -} - -func (b *BitSet) length() int { - cnt := 0 - for _, val := range b.data { - cnt += bits.OnesCount64(val) - } - return cnt -} - -func (b *BitSet) String() string { - vals := make([]string, 0, b.length()) - - for i, v := range b.data { - for v != 0 { - n := bits.TrailingZeros64(v) - vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) - v &= ^(uint64(1) << n) - } - } - - return "{" + strings.Join(vals, ", ") + "}" -} - -type AltDict struct { - data map[string]interface{} -} - -func NewAltDict() *AltDict { - d := new(AltDict) - d.data = make(map[string]interface{}) - return d -} - -func (a *AltDict) Get(key string) interface{} { - key = "k-" + key - return a.data[key] -} - -func (a *AltDict) put(key string, value interface{}) { - key = "k-" + key - a.data[key] = value -} - -func (a *AltDict) values() []interface{} { - vs := make([]interface{}, len(a.data)) - i := 0 - for _, v := range a.data { - vs[i] = v - i++ - } - return vs -} - -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - -func EscapeWhitespace(s string, escapeSpaces bool) string { - - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - if escapeSpaces { - s = strings.Replace(s, " ", "\u00B7", -1) - } - return s -} - -func TerminalNodeToStringArray(sa []TerminalNode) []string { - st := make([]string, len(sa)) - - for i, s := range sa { - st[i] = fmt.Sprintf("%v", s) - } - - return st -} - -func PrintArrayJavaStyle(sa []string) string { - var buffer bytes.Buffer - - buffer.WriteString("[") - - for i, s := range sa { - buffer.WriteString(s) - if i != len(sa)-1 { - buffer.WriteString(", ") - } - } - - buffer.WriteString("]") - - return buffer.String() -} - -// murmur hash -func murmurInit(seed int) int { - return seed -} - -func murmurUpdate(h int, value int) int { - const c1 uint32 = 0xCC9E2D51 - const c2 uint32 = 0x1B873593 - const r1 uint32 = 15 - const r2 uint32 = 13 - const m uint32 = 5 - const n uint32 = 0xE6546B64 - - k := uint32(value) - k *= c1 - k = (k << r1) | (k >> (32 - r1)) - k *= c2 - - hash := uint32(h) ^ k - hash = (hash << r2) | (hash >> (32 - r2)) - hash = hash*m + n - return int(hash) -} - -func murmurFinish(h int, numberOfWords int) int { - var hash = uint32(h) - hash ^= uint32(numberOfWords) << 2 - hash ^= hash >> 16 - hash *= 0x85ebca6b - hash ^= hash >> 13 - hash *= 0xc2b2ae35 - hash ^= hash >> 16 - - return int(hash) -} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go deleted file mode 100644 index c9bd6751..00000000 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go +++ /dev/null @@ -1,235 +0,0 @@ -package antlr - -import "math" - -const ( - _initalCapacity = 16 - _initalBucketCapacity = 8 - _loadFactor = 0.75 -) - -type Set interface { - Add(value interface{}) (added interface{}) - Len() int - Get(value interface{}) (found interface{}) - Contains(value interface{}) bool - Values() []interface{} - Each(f func(interface{}) bool) -} - -type array2DHashSet struct { - buckets [][]Collectable[any] - hashcodeFunction func(interface{}) int - equalsFunction func(Collectable[any], Collectable[any]) bool - - n int // How many elements in set - threshold int // when to expand - - currentPrime int // jump by 4 primes each expand or whatever - initialBucketCapacity int -} - -func (as *array2DHashSet) Each(f func(interface{}) bool) { - if as.Len() < 1 { - return - } - - for _, bucket := range as.buckets { - for _, o := range bucket { - if o == nil { - break - } - if !f(o) { - return - } - } - } -} - -func (as *array2DHashSet) Values() []interface{} { - if as.Len() < 1 { - return nil - } - - values := make([]interface{}, 0, as.Len()) - as.Each(func(i interface{}) bool { - values = append(values, i) - return true - }) - return values -} - -func (as *array2DHashSet) Contains(value Collectable[any]) bool { - return as.Get(value) != nil -} - -func (as *array2DHashSet) Add(value Collectable[any]) interface{} { - if as.n > as.threshold { - as.expand() - } - return as.innerAdd(value) -} - -func (as *array2DHashSet) expand() { - old := as.buckets - - as.currentPrime += 4 - - var ( - newCapacity = len(as.buckets) << 1 - newTable = as.createBuckets(newCapacity) - newBucketLengths = make([]int, len(newTable)) - ) - - as.buckets = newTable - as.threshold = int(float64(newCapacity) * _loadFactor) - - for _, bucket := range old { - if bucket == nil { - continue - } - - for _, o := range bucket { - if o == nil { - break - } - - b := as.getBuckets(o) - bucketLength := newBucketLengths[b] - var newBucket []Collectable[any] - if bucketLength == 0 { - // new bucket - newBucket = as.createBucket(as.initialBucketCapacity) - newTable[b] = newBucket - } else { - newBucket = newTable[b] - if bucketLength == len(newBucket) { - // expand - newBucketCopy := make([]Collectable[any], len(newBucket)<<1) - copy(newBucketCopy[:bucketLength], newBucket) - newBucket = newBucketCopy - newTable[b] = newBucket - } - } - - newBucket[bucketLength] = o - newBucketLengths[b]++ - } - } -} - -func (as *array2DHashSet) Len() int { - return as.n -} - -func (as *array2DHashSet) Get(o Collectable[any]) interface{} { - if o == nil { - return nil - } - - b := as.getBuckets(o) - bucket := as.buckets[b] - if bucket == nil { // no bucket - return nil - } - - for _, e := range bucket { - if e == nil { - return nil // empty slot; not there - } - if as.equalsFunction(e, o) { - return e - } - } - - return nil -} - -func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { - b := as.getBuckets(o) - - bucket := as.buckets[b] - - // new bucket - if bucket == nil { - bucket = as.createBucket(as.initialBucketCapacity) - bucket[0] = o - - as.buckets[b] = bucket - as.n++ - return o - } - - // look for it in bucket - for i := 0; i < len(bucket); i++ { - existing := bucket[i] - if existing == nil { // empty slot; not there, add. - bucket[i] = o - as.n++ - return o - } - - if as.equalsFunction(existing, o) { // found existing, quit - return existing - } - } - - // full bucket, expand and add to end - oldLength := len(bucket) - bucketCopy := make([]Collectable[any], oldLength<<1) - copy(bucketCopy[:oldLength], bucket) - bucket = bucketCopy - as.buckets[b] = bucket - bucket[oldLength] = o - as.n++ - return o -} - -func (as *array2DHashSet) getBuckets(value Collectable[any]) int { - hash := as.hashcodeFunction(value) - return hash & (len(as.buckets) - 1) -} - -func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { - return make([][]Collectable[any], cap) -} - -func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { - return make([]Collectable[any], cap) -} - -func newArray2DHashSetWithCap( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, - initCap int, - initBucketCap int, -) *array2DHashSet { - if hashcodeFunction == nil { - hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - equalsFunction = standardEqualsFunction - } - - ret := &array2DHashSet{ - hashcodeFunction: hashcodeFunction, - equalsFunction: equalsFunction, - - n: 0, - threshold: int(math.Floor(_initalCapacity * _loadFactor)), - - currentPrime: 1, - initialBucketCapacity: initBucketCap, - } - - ret.buckets = ret.createBuckets(initCap) - return ret -} - -func newArray2DHashSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, -) *array2DHashSet { - return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) -} diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 00000000..38ea34ff --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 00000000..a22292eb --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 00000000..03e5b83e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 00000000..3bb4fd7c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.12.0 - this was confusing, to say the least. + +As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.12.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go similarity index 94% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go rename to vendor/github.com/antlr4-go/antlr/v4/atn.go index 98010d2e..cdeefed2 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -20,10 +20,11 @@ var ATNInvalidAltNumber int // [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf // [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... + // all the rules, sub-rules, optional blocks, ()+, ()* etc... DecisionToState []DecisionState // grammarType is the ATN type and is used for deserializing ATNs from strings. @@ -51,6 +52,8 @@ type ATN struct { // specified, and otherwise is nil. ruleToTokenType []int + // ATNStates is a list of all states in the ATN, ordered by state number. + // states []ATNState mu sync.Mutex diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 00000000..a83f25d3 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,335 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed + } + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 00000000..52dbaf80 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go similarity index 86% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go index 3c975ec7..bdb30b36 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -20,7 +20,7 @@ func (opts *ATNDeserializationOptions) ReadOnly() bool { func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.readOnly = readOnly } @@ -31,7 +31,7 @@ func (opts *ATNDeserializationOptions) VerifyATN() bool { func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.verifyATN = verifyATN } @@ -42,11 +42,12 @@ func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) } opts.generateRuleBypassTransitions = generateRuleBypassTransitions } +//goland:noinspection GoUnusedExportedFunction func DefaultATNDeserializationOptions() *ATNDeserializationOptions { return NewATNDeserializationOptions(&defaultATNDeserializationOptions) } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go index 3888856b..2dcb9ae1 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -35,6 +35,7 @@ func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { return &ATNDeserializer{options: options} } +//goland:noinspection GoUnusedFunction func stringInSlice(a string, list []string) int { for i, b := range list { if b == a { @@ -193,7 +194,7 @@ func (a *ATNDeserializer) readModes(atn *ATN) { } } -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { m := a.readInt() // Preallocate the needed capacity. @@ -350,7 +351,7 @@ func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { bypassStart.endState = bypassStop - atn.defineDecisionState(bypassStart.BaseDecisionState) + atn.defineDecisionState(&bypassStart.BaseDecisionState) bypassStop.startState = bypassStart @@ -450,7 +451,7 @@ func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { continue } - // We analyze the ATN to determine if a ATN decision state is the + // We analyze the [ATN] to determine if an ATN decision state is the // decision for the closure block that determines whether a // precedence rule should continue or complete. if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { @@ -553,7 +554,7 @@ func (a *ATNDeserializer) readInt() int { return int(v) // data is 32 bits but int is at least that big } -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { target := atn.states[trg] switch typeIndex { diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go new file mode 100644 index 00000000..afe6c9f8 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { + if b.sharedContextCache == nil { + return context + } + + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go new file mode 100644 index 00000000..2ae5807c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +//goland:noinspection GoUnusedGlobalVariable +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) + as.epsilonOnlyTransitions = false + } + + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} +} + +type BasicState struct { + BaseATNState +} + +func NewBasicState() *BasicState { + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + BaseATNState +} + +func NewRuleStopState() *RuleStopState { + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } +} + +type RuleStartState struct { + BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } +} + +type StarLoopEntryState struct { + BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go rename to vendor/github.com/antlr4-go/antlr/v4/atn_type.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go similarity index 89% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/char_stream.go index c33f0adb..bd8127b6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -8,5 +8,5 @@ type CharStream interface { IntStream GetText(int, int) string GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go similarity index 88% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go index c6c9485a..b75da9df 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -28,22 +28,24 @@ type CommonTokenStream struct { // trivial with bt field. fetchedEOF bool - // index indexs into tokens of the current token (next token to consume). + // index into [tokens] of the current token (next token to consume). // tokens[p] should be LT(1). It is set to -1 when the stream is first // constructed or when SetTokenSource is called, indicating that the first token // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. + // see the documentation of [IntStream] for a description of initializing methods. index int - // tokenSource is the TokenSource from which tokens for the bt stream are + // tokenSource is the [TokenSource] from which tokens for the bt stream are // fetched. tokenSource TokenSource - // tokens is all tokens fetched from the token source. The list is considered a + // tokens contains all tokens fetched from the token source. The list is considered a // complete view of the input once fetchedEOF is set to true. tokens []Token } +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { return &CommonTokenStream{ channel: channel, @@ -53,6 +55,7 @@ func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { } } +// GetAllTokens returns all tokens currently pulled from the token source. func (c *CommonTokenStream) GetAllTokens() []Token { return c.tokens } @@ -61,9 +64,11 @@ func (c *CommonTokenStream) Mark() int { return 0 } -func (c *CommonTokenStream) Release(marker int) {} +func (c *CommonTokenStream) Release(_ int) {} -func (c *CommonTokenStream) reset() { +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) c.Seek(0) } @@ -107,7 +112,7 @@ func (c *CommonTokenStream) Consume() { // Sync makes sure index i in tokens has a token and returns true if a token is // located at index i and otherwise false. func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + n := i - len(c.tokens) + 1 // How many more elements do we need? if n > 0 { fetched := c.fetch(n) @@ -193,12 +198,13 @@ func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { c.tokenSource = tokenSource c.tokens = make([]Token, 0) c.index = -1 + c.fetchedEOF = false } // NextTokenOnChannel returns the index of the next token on channel given a // starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { c.Sync(i) if i >= len(c.tokens) { @@ -244,7 +250,7 @@ func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []To nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) from := tokenIndex + 1 - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token var to int if nextOnChannel == -1 { @@ -314,7 +320,8 @@ func (c *CommonTokenStream) Index() int { } func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) } func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { @@ -329,15 +336,9 @@ func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string return c.GetTextFromInterval(interval.GetSourceInterval()) } -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } + c.Sync(interval.Stop) start := interval.Start stop := interval.Stop diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go rename to vendor/github.com/antlr4-go/antlr/v4/comparators.go index 9ea32005..7467e9b4 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go +++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -18,17 +18,20 @@ package antlr // type safety and avoid having to implement this for every type that we want to perform comparison on. // // This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which -// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +// allows us to use it in any collection instance that does not require a special hash or equals implementation. type ObjEqComparator[T Collectable[T]] struct{} var ( - aStateEqInst = &ObjEqComparator[ATNState]{} - aConfEqInst = &ObjEqComparator[ATNConfig]{} - aConfCompInst = &ATNConfigComparator[ATNConfig]{} - atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} dfaStateEqInst = &ObjEqComparator[*DFAState]{} semctxEqInst = &ObjEqComparator[SemanticContext]{} - atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} ) // Equals2 delegates to the Equals() method of type T @@ -44,14 +47,14 @@ func (c *ObjEqComparator[T]) Hash1(o T) int { type SemCComparator[T Collectable[T]] struct{} -// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type ATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -72,7 +75,8 @@ func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + hash := 7 hash = 31*hash + o.GetState().GetStateNumber() hash = 31*hash + o.GetAlt() @@ -85,7 +89,7 @@ type ATNAltConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -105,21 +109,21 @@ func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { } // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { h := murmurInit(7) h = murmurUpdate(h, o.GetState().GetStateNumber()) h = murmurUpdate(h, o.GetContext().Hash()) return murmurFinish(h, 2) } -// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet // and has a custom Equals() and Hash() implementation, because equality is not based on the // standard Hash() and Equals() methods of the ATNConfig type. type BaseATNConfigComparator[T Collectable[T]] struct { } // Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet -func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { // Same pointer, must be equal, even if both nil // @@ -141,7 +145,6 @@ func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { // Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just // delegates to the standard Hash() method of the ATNConfig type. -func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { - +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { return o.Hash() } diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 00000000..c2b72451 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go similarity index 76% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa.go index bfd43e1f..6b63eb15 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -4,6 +4,8 @@ package antlr +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. type DFA struct { // atnStartState is the ATN state in which this was created atnStartState DecisionState @@ -12,10 +14,9 @@ type DFA struct { // states is all the DFA states. Use Map to get the old state back; Set can only // indicate whether it is there. Go maps implement key hash collisions and so on and are very - // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them - // to see if they really are the same object. - // + // to see if they really are the same object. Hence, we have our own map storage. // states *JStore[*DFAState, *ObjEqComparator[*DFAState]] @@ -32,11 +33,11 @@ func NewDFA(atnStartState DecisionState, decision int) *DFA { dfa := &DFA{ atnStartState: atnStartState, decision: decision, - states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), + states: nil, // Lazy initialize } if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { dfa.precedenceDfa = true - dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) dfa.s0.isAcceptState = false dfa.s0.requiresFullContext = false } @@ -95,12 +96,11 @@ func (d *DFA) getPrecedenceDfa() bool { // true or nil otherwise, and d.precedenceDfa is updated. func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { if d.getPrecedenceDfa() != precedenceDfa { - d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) + d.states = nil // Lazy initialize d.numstates = 0 if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) precedenceState.setEdges(make([]*DFAState, 0)) precedenceState.isAcceptState = false precedenceState.requiresFullContext = false @@ -113,6 +113,31 @@ func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { } } +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + func (d *DFA) getS0() *DFAState { return d.s0 } @@ -121,9 +146,11 @@ func (d *DFA) setS0(s *DFAState) { d.s0 = s } -// sortedStates returns the states in d sorted by their state number. +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. func (d *DFA) sortedStates() []*DFAState { - + if d.states == nil { + return []*DFAState{} + } vs := d.states.SortedSlice(func(i, j *DFAState) bool { return i.stateNumber < j.stateNumber }) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go similarity index 97% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go index 84d0a31e..0e110098 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -10,7 +10,7 @@ import ( "strings" ) -// DFASerializer is a DFA walker that knows how to dump them to serialized +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized // strings. type DFASerializer struct { dfa *DFA diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go rename to vendor/github.com/antlr4-go/antlr/v4/dfa_state.go index c90dec55..65414307 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -22,30 +22,31 @@ func (p *PredPrediction) String() string { return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" } -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, // Ullman p. 117 says: "The DFA uses its state to keep track of all possible // states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the +// after reading input a1, a2,..an, the DFA is in a state that represents the // subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state // as well, however. More importantly, we need to maintain a stack of states, // tracking the closure operations as they jump from rule to rule, emulating // rule invocations (method calls). I have to add a stack to simulate the proper // lookahead sequences for the underlying LL grammar from which the ATN was // derived. // -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules // (if any) followed to arrive at that state. // -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was // reached via a different set of rule invocations. type DFAState struct { stateNumber int - configs ATNConfigSet + configs *ATNConfigSet // edges elements point to the target of the symbol. Shift up by 1 so (-1) // Token.EOF maps to the first element. @@ -53,7 +54,7 @@ type DFAState struct { isAcceptState bool - // prediction is the ttype we match or alt we predict if the state is accept. + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or // requiresFullContext. prediction int @@ -81,9 +82,9 @@ type DFAState struct { predicates []*PredPrediction } -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { if configs == nil { - configs = NewBaseATNConfigSet(false) + configs = NewATNConfigSet(false) } return &DFAState{configs: configs, stateNumber: stateNumber} @@ -94,7 +95,7 @@ func (d *DFAState) GetAltSet() []int { var alts []int if d.configs != nil { - for _, c := range d.configs.GetItems() { + for _, c := range d.configs.configs { alts = append(alts, c.GetAlt()) } } diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go index c55bcc19..bd2cd8bc 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go +++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -33,6 +33,7 @@ type DiagnosticErrorListener struct { exactOnly bool } +//goland:noinspection GoUnusedExportedFunction func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { n := new(DiagnosticErrorListener) @@ -42,7 +43,7 @@ func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { return n } -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { if d.exactOnly && !exact { return } @@ -55,7 +56,7 @@ func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, s recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { msg := "reportAttemptingFullContext d=" + d.getDecisionDescription(recognizer, dfa) + @@ -64,7 +65,7 @@ func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, recognizer.NotifyErrorListeners(msg, nil, nil) } -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { msg := "reportContextSensitivity d=" + d.getDecisionDescription(recognizer, dfa) + ", input='" + @@ -96,12 +97,12 @@ func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa // @param configs The conflicting or ambiguous configuration set. // @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise // returns the set of alternatives represented in {@code configs}. -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { if ReportedAlts != nil { return ReportedAlts } result := NewBitSet() - for _, c := range set.GetItems() { + for _, c := range set.configs { result.add(c.GetAlt()) } diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go new file mode 100644 index 00000000..21a02164 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +//goland:noinspection GoUnusedExportedFunction +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: +// +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go new file mode 100644 index 00000000..9db2be1c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -0,0 +1,702 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseam. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.GetErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. +// +// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: +// +// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} +// +// At the start of a sub-rule upon error, Sync performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, +// +// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. +// +// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. +// +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule: +// +// classfunc : 'class' ID '{' member* '}' +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + recognizer.SetError(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. +// +// See also [ReportError] +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportUnwantedToken is called to report a syntax error that requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. +// +// This method is called when singleTokenDeletion identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling +// [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. +// +// This method is called when singleTokenInsertion identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// The RecoverInline default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI +// +// # EXTRA TOKEN (single token deletion) +// +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the LA(2) token) as the successful result of the Match operation. +// +// # This recovery strategy is implemented by singleTokenDeletion +// +// # MISSING TOKEN (single token insertion) +// +// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. +// +// This recovery strategy is implemented by [SingleTokenInsertion]. +// +// # Example +// +// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: +// +// stat → expr → atom +// +// and it will be trying to Match the ')' at this point in the +// derivation: +// +// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ +// +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil +} + +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce this behavior.

+// +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// recognizer will not be in error recovery mode since the +// returned token was a successful Match. +// +// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. +// +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// GetMissingSymbol conjures up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// this token is missing, and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO: matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a new type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// GetErrorRecoverySet computes the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack. This amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # Example +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r or any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; +// +// b : c '^' INT +// ; +// +// c : ID +// | INT +// ; +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input “[]”, the call chain is +// +// a → b → c +// +// and, hence, the follow context stack is: +// +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets - the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. +// +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs +// +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors +// by immediately canceling the parse operation with a +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +// This error strategy is useful in the following scenarios. +// +// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. +// +// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. +// +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +//goland:noinspection GoUnusedExportedFunction +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly +} + +// RecoverInline makes sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go new file mode 100644 index 00000000..8f0f2f60 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -0,0 +1,259 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + + // The current Token when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + // + t.offendingToken = nil + + // Get the ATN state number the parser was in at the time the error + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. +// +// If the set of expected tokens is not known and could not be computed, +// this method returns nil. +// +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs *ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs *ATNConfigSet +} + +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1) + n.deadEndConfigs = deadEndConfigs + + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go new file mode 100644 index 00000000..5f65f809 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + InputStream + filename string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFileStream(fileName string) (*FileStream, error) { + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } + + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + + // All done. + // + return fs, nil +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 00000000..b737fe85 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/int_stream.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go similarity index 82% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go rename to vendor/github.com/antlr4-go/antlr/v4/interval_set.go index c1e155e8..cc506606 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go +++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -14,20 +14,21 @@ type Interval struct { Stop int } -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } } -func (i *Interval) Contains(item int) bool { +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { return item >= i.Start && item < i.Stop } -func (i *Interval) String() string { +// String generates a string representation of the interval. +func (i Interval) String() string { if i.Start == i.Stop-1 { return strconv.Itoa(i.Start) } @@ -35,15 +36,18 @@ func (i *Interval) String() string { return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) } -func (i *Interval) length() int { +// Length returns the length of the interval. +func (i Interval) Length() int { return i.Stop - i.Start } +// IntervalSet represents a collection of [Intervals], which may be read-only. type IntervalSet struct { - intervals []*Interval + intervals []Interval readOnly bool } +// NewIntervalSet creates a new empty, writable, interval set. func NewIntervalSet() *IntervalSet { i := new(IntervalSet) @@ -54,6 +58,20 @@ func NewIntervalSet() *IntervalSet { return i } +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + func (i *IntervalSet) first() int { if len(i.intervals) == 0 { return TokenInvalidType @@ -70,16 +88,16 @@ func (i *IntervalSet) addRange(l, h int) { i.addInterval(NewInterval(l, h+1)) } -func (i *IntervalSet) addInterval(v *Interval) { +func (i *IntervalSet) addInterval(v Interval) { if i.intervals == nil { - i.intervals = make([]*Interval, 0) + i.intervals = make([]Interval, 0) i.intervals = append(i.intervals, v) } else { // find insert pos for k, interval := range i.intervals { // distinct range -> insert if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) return } else if v.Stop == interval.Start { i.intervals[k].Start = v.Start @@ -139,16 +157,16 @@ func (i *IntervalSet) contains(item int) bool { } func (i *IntervalSet) length() int { - len := 0 + iLen := 0 for _, v := range i.intervals { - len += v.length() + iLen += v.Length() } - return len + return iLen } -func (i *IntervalSet) removeRange(v *Interval) { +func (i *IntervalSet) removeRange(v Interval) { if v.Start == v.Stop-1 { i.removeOne(v.Start) } else if i.intervals != nil { @@ -162,7 +180,7 @@ func (i *IntervalSet) removeRange(v *Interval) { i.intervals[k] = NewInterval(ni.Start, v.Start) x := NewInterval(v.Stop, ni.Stop) // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } else if v.Start <= ni.Start && v.Stop >= ni.Stop { // i.intervals.splice(k, 1) @@ -199,7 +217,7 @@ func (i *IntervalSet) removeOne(v int) { x := NewInterval(ki.Start, v) ki.Start = v + 1 // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) return } } @@ -223,7 +241,7 @@ func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []strin return i.toIndexString() } -func (i *IntervalSet) GetIntervals() []*Interval { +func (i *IntervalSet) GetIntervals() []Interval { return i.intervals } diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 00000000..ceccd96d --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,685 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" + "sync" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock sync.RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: sync.RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer.go index 6533f051..3c7896a9 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -69,7 +69,7 @@ func NewBaseLexer(input CharStream) *BaseLexer { // create a single token. NextToken will return l object after // Matching lexer rule(s). If you subclass to allow multiple token // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not + // something non nil so that the auto token emit mechanism will not // emit another token. lexer.token = nil @@ -111,6 +111,7 @@ const ( LexerSkip = -3 ) +//goland:noinspection GoUnusedConst const ( LexerDefaultTokenChannel = TokenDefaultChannel LexerHidden = TokenHiddenChannel @@ -118,7 +119,7 @@ const ( LexerMaxCharValue = 0x10FFFF ) -func (b *BaseLexer) reset() { +func (b *BaseLexer) Reset() { // wack Lexer state variables if b.input != nil { b.input.Seek(0) // rewind the input @@ -176,7 +177,7 @@ func (b *BaseLexer) safeMatch() (ret int) { return b.Interpreter.Match(b.input, b.mode) } -// Return a token from l source i.e., Match a token on the char stream. +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. func (b *BaseLexer) NextToken() Token { if b.input == nil { panic("NextToken requires a non-nil input stream.") @@ -205,9 +206,8 @@ func (b *BaseLexer) NextToken() Token { continueOuter := false for { b.thetype = TokenInvalidType - ttype := LexerSkip - ttype = b.safeMatch() + ttype := b.safeMatch() if b.input.LA(1) == TokenEOF { b.hitEOF = true @@ -234,12 +234,11 @@ func (b *BaseLexer) NextToken() Token { } } -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that // if token==nil at end of any token rule, it creates one for you // and emits it. -// / func (b *BaseLexer) Skip() { b.thetype = LexerSkip } @@ -248,23 +247,29 @@ func (b *BaseLexer) More() { b.thetype = LexerMore } +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. func (b *BaseLexer) SetMode(m int) { b.mode = m } +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("pushMode " + strconv.Itoa(m)) } b.modeStack.Push(b.mode) b.mode = m } +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. func (b *BaseLexer) PopMode() int { if len(b.modeStack) == 0 { panic("Empty Stack") } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) } i, _ := b.modeStack.Pop() @@ -280,7 +285,7 @@ func (b *BaseLexer) inputStream() CharStream { func (b *BaseLexer) SetInputStream(input CharStream) { b.input = nil b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() + b.Reset() b.input = input b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} } @@ -289,20 +294,19 @@ func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { return b.tokenFactorySourcePair } -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). func (b *BaseLexer) EmitToken(token Token) { b.token = token } -// The standard method called to automatically emit a token at the +// Emit is the standard method called to automatically emit a token at the // outermost lexical rule. The token object should point into the // char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. // / func (b *BaseLexer) Emit() Token { t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) @@ -310,6 +314,7 @@ func (b *BaseLexer) Emit() Token { return t } +// EmitEOF emits an EOF token. By default, this is the last token emitted func (b *BaseLexer) EmitEOF() Token { cpos := b.GetCharPositionInLine() lpos := b.GetLine() @@ -318,6 +323,7 @@ func (b *BaseLexer) EmitEOF() Token { return eof } +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. func (b *BaseLexer) GetCharPositionInLine() int { return b.Interpreter.GetCharPositionInLine() } @@ -334,13 +340,12 @@ func (b *BaseLexer) SetType(t int) { b.thetype = t } -// What is the index of the current character of lookahead?/// +// GetCharIndex returns the index of the current character of lookahead func (b *BaseLexer) GetCharIndex() int { return b.input.Index() } -// Return the text Matched so far for the current token or any text override. -// Set the complete text of l token it wipes any previous changes to the text. +// GetText returns the text Matched so far for the current token or any text override. func (b *BaseLexer) GetText() string { if b.text != "" { return b.text @@ -349,17 +354,20 @@ func (b *BaseLexer) GetText() string { return b.Interpreter.GetText(b.input) } +// SetText sets the complete text of this token; it wipes any previous changes to the text. func (b *BaseLexer) SetText(text string) { b.text = text } +// GetATN returns the ATN used by the lexer. func (b *BaseLexer) GetATN() *ATN { return b.Interpreter.ATN() } -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. func (b *BaseLexer) GetAllTokens() []Token { vl := b.Virt tokens := make([]Token, 0) @@ -398,11 +406,13 @@ func (b *BaseLexer) getCharErrorDisplay(c rune) string { return "'" + b.getErrorDisplayForChar(c) + "'" } -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope // it all works out. You can instead use the rule invocation stack // to do sophisticated error recovery if you are in a fragment rule. -// / +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. func (b *BaseLexer) Recover(re RecognitionException) { if b.input.LA(1) != TokenEOF { if _, ok := re.(*LexerNoViableAltException); ok { diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go similarity index 78% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_action.go index 111656c2..eaa7393e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -7,14 +7,29 @@ package antlr import "strconv" const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 ) type LexerAction interface { @@ -39,7 +54,7 @@ func NewBaseLexerAction(action int) *BaseLexerAction { return la } -func (b *BaseLexerAction) execute(lexer Lexer) { +func (b *BaseLexerAction) execute(_ Lexer) { panic("Not implemented") } @@ -52,17 +67,19 @@ func (b *BaseLexerAction) getIsPositionDependent() bool { } func (b *BaseLexerAction) Hash() int { - return b.actionType + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) } func (b *BaseLexerAction) Equals(other LexerAction) bool { - return b == other + return b.actionType == other.getActionType() } -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. // -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. type LexerSkipAction struct { *BaseLexerAction } @@ -73,17 +90,22 @@ func NewLexerSkipAction() *LexerSkipAction { return la } -// Provides a singleton instance of l parameterless lexer action. +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. var LexerSkipActionINSTANCE = NewLexerSkipAction() func (l *LexerSkipAction) execute(lexer Lexer) { lexer.Skip() } +// String returns a string representation of the current [LexerSkipAction]. func (l *LexerSkipAction) String() string { return "skip" } +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + // Implements the {@code type} lexer action by calling {@link Lexer//setType} // // with the assigned type. @@ -125,11 +147,10 @@ func (l *LexerTypeAction) String() string { return "actionType(" + strconv.Itoa(l.thetype) + ")" } -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. type LexerPushModeAction struct { *BaseLexerAction - mode int } @@ -169,10 +190,10 @@ func (l *LexerPushModeAction) String() string { return "pushMode(" + strconv.Itoa(l.mode) + ")" } -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. // -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

+// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] type LexerPopModeAction struct { *BaseLexerAction } @@ -224,11 +245,10 @@ func (l *LexerMoreAction) String() string { return "more" } -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with // the assigned mode. type LexerModeAction struct { *BaseLexerAction - mode int } @@ -322,16 +342,19 @@ func (l *LexerCustomAction) Equals(other LexerAction) bool { } } -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. type LexerChannelAction struct { *BaseLexerAction - channel int } +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. func NewLexerChannelAction(channel int) *LexerChannelAction { l := new(LexerChannelAction) l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) @@ -375,25 +398,22 @@ func (l *LexerChannelAction) String() string { // lexer actions, see {@link LexerActionExecutor//append} and // {@link LexerActionExecutor//fixOffsetBeforeMatch}.

-// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. type LexerIndexedCustomAction struct { *BaseLexerAction - offset int lexerAction LexerAction isPositionDependent bool } +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { l := new(LexerIndexedCustomAction) diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go new file mode 100644 index 00000000..dfc28c32 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) + + return l +} + +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// for position-dependent lexer actions. +// +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters. +// +// Prior to traversing a Match transition in the [ATN], the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the [DFA] representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. +// +// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. +// +// The offset is assigned to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// The func returns a [LexerActionExecutor] that stores input stream offsets +// for all position-dependent lexer actions. +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go rename to vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go index c573b752..fe938b02 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -10,10 +10,8 @@ import ( "strings" ) +//goland:noinspection GoUnusedGlobalVariable var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - LexerATNSimulatorMinDFAEdge = 0 LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN @@ -32,11 +30,11 @@ type ILexerATNSimulator interface { } type LexerATNSimulator struct { - *BaseATNSimulator + BaseATNSimulator recog Lexer predictionMode int - mergeCache DoubleDict + mergeCache *JPCMap2 startIndex int Line int CharPositionInLine int @@ -46,27 +44,35 @@ type LexerATNSimulator struct { } func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } l.decisionToDFA = decisionToDFA l.recog = recog + // The current token's starting index into the character stream. // Shared across DFA to ATN simulation in case the ATN fails and the // DFA did not have a previous accept state. In l case, we use the // ATN-generated exception object. l.startIndex = -1 - // line number 1..n within the input/// + + // line number 1..n within the input l.Line = 1 + // The index of the character relative to the beginning of the line - // 0..n-1/// + // 0..n-1 l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration // info l.prevAccept = NewSimState() - // done + return l } @@ -114,7 +120,7 @@ func (l *LexerATNSimulator) reset() { func (l *LexerATNSimulator) MatchATN(input CharStream) int { startState := l.atn.modeToStartState[l.mode] - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) } oldMode := l.mode @@ -126,7 +132,7 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { predict := l.execATN(input, next) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) } return predict @@ -134,18 +140,18 @@ func (l *LexerATNSimulator) MatchATN(input CharStream) int { func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("start state closure=" + ds0.configs.String()) } if ds0.isAcceptState { - // allow zero-length tokens + // allow zero-Length tokens l.captureSimState(l.prevAccept, input, ds0) } t := input.LA(1) s := ds0 // s is current/from DFA state for { // while more work - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("execATN loop starting closure: " + s.configs.String()) } @@ -188,7 +194,7 @@ func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { } } t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state + s = target // flip current DFA target becomes new src/from state } return l.failOrAccept(l.prevAccept, input, s.configs, t) @@ -214,43 +220,39 @@ func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState return nil } target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { + if runtimeConfig.lexerATNSimulatorDebug && target != nil { fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) } return target } -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. // -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { reach := NewOrderedATNConfigSet() // if we don't find an existing DFA state // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + l.getReachableConfigSet(input, s.configs, reach, t) if len(reach.configs) == 0 { // we got nowhere on t from s if !reach.hasSemanticContext { // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. + // cause a fail-over from DFA later. l.addDFAEdge(s, t, ATNSimulatorError, nil) } // stop when we can't Match any more char return ATNSimulatorError } // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) + return l.addDFAEdge(s, t, nil, reach) } -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { if l.prevAccept.dfaState != nil { lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) @@ -265,34 +267,35 @@ func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) } -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule + // than a runtimeConfig that already reached an accept state for the same rule SkipAlt := ATNInvalidAltNumber - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { continue } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) } for _, trans := range cfg.GetState().GetTransitions() { target := l.getReachableTarget(trans, t) if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + lexerActionExecutor := cfg.lexerActionExecutor if lexerActionExecutor != nil { lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) if l.closure(input, config, reach, currentAltReachedAcceptState, true, treatEOFAsEpsilon) { // any remaining configs for l alt have a lower priority @@ -305,7 +308,7 @@ func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNC } func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Printf("ACTION %v\n", lexerActionExecutor) } // seek to after last char in token @@ -325,7 +328,7 @@ func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState return nil } -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { configs := NewOrderedATNConfigSet() for i := 0; i < len(p.GetTransitions()); i++ { target := p.GetTransitions()[i].getTarget() @@ -336,25 +339,24 @@ func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *Ord return configs } -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept // state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. // -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") } _, ok := config.state.(*RuleStopState) if ok { - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { if l.recog != nil { fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) } else { @@ -401,10 +403,10 @@ func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, co } // side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { - var cfg *LexerATNConfig + var cfg *ATNConfig if trans.getSerializationType() == TransitionRULE { @@ -435,10 +437,10 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC pt := trans.(*PredicateTransition) - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) } - configs.SetHasSemanticContext(true) + configs.hasSemanticContext = true if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { cfg = NewLexerATNConfig4(config, trans.getTarget()) } @@ -449,7 +451,7 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC // TODO: if the entry rule is invoked recursively, some // actions may be executed during the recursive call. The // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be + // isEmpty() is false. In this case, the config needs to be // split into two contexts - one with just the empty path // and another with everything but the empty path. // Unfortunately, the current algorithm does not allow @@ -476,26 +478,18 @@ func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNC return cfg } -// Evaluate a predicate specified in the lexer. +// evaluatePredicate eEvaluates a predicate specified in the lexer. // -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

+// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. // -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / +// The func returns true if the specified predicate evaluates to true. func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { // assume true if no recognizer was provided if l.recog == nil { @@ -527,7 +521,7 @@ func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream settings.dfaState = dfaState } -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { if to == nil && cfgs != nil { // leading to l call, ATNConfigSet.hasSemanticContext is used as a // marker indicating dynamic predicate evaluation makes l edge @@ -539,10 +533,9 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // TJP notes: next time through the DFA, we see a pred again and eval. // If that gets us to a previously created (but dangling) DFA // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false to = l.addDFAState(cfgs, true) if suppressEdge { @@ -554,7 +547,7 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // Only track edges within the DFA bounds return to } - if LexerATNSimulatorDebug { + if runtimeConfig.lexerATNSimulatorDebug { fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) } l.atn.edgeMu.Lock() @@ -572,13 +565,12 @@ func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfg // configurations already. This method also detects the first // configuration containing an ATN rule stop state. Later, when // traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { + var firstConfigWithRuleStopState *ATNConfig + for _, cfg := range configs.configs { _, ok := cfg.GetState().(*RuleStopState) if ok { @@ -588,14 +580,14 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) } if firstConfigWithRuleStopState != nil { proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) } dfa := l.decisionToDFA[l.mode] l.atn.stateMu.Lock() defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) + existing, present := dfa.Get(proposed) if present { // This state was already present, so just return it. @@ -605,10 +597,11 @@ func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) // We need to add the new state // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now proposed.configs = configs - dfa.states.Put(proposed) + dfa.Put(proposed) } if !suppressEdge { dfa.setS0(proposed) @@ -620,7 +613,7 @@ func (l *LexerATNSimulator) getDFA(mode int) *DFA { return l.decisionToDFA[mode] } -// Get the text Matched so far for the current token. +// GetText returns the text [Match]ed so far for the current token. func (l *LexerATNSimulator) GetText(input CharStream) string { // index is first lookahead char, don't include. return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go new file mode 100644 index 00000000..4955ac87 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,218 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + + look[alt] = NewIntervalSet() + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. +// +// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. +// +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context +// should be ignored +// +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + var lookContext *PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx.pcType != PredictionContextEmpty { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 00000000..923c7b52 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go similarity index 80% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go rename to vendor/github.com/antlr4-go/antlr/v4/parser.go index d26bf063..fb57ac15 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -48,8 +48,10 @@ type BaseParser struct { _SyntaxErrors int } -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction func NewBaseParser(input TokenStream) *BaseParser { p := new(BaseParser) @@ -58,39 +60,46 @@ func NewBaseParser(input TokenStream) *BaseParser { // The input stream. p.input = nil + // The error handling strategy for the parser. The default value is a new // instance of {@link DefaultErrorStrategy}. p.errHandler = NewDefaultErrorStrategy() p.precedenceStack = make([]int, 0) p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. + + // The ParserRuleContext object for the currently executing rule. // p.is always non-nil during the parsing process. p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during + + // Specifies whether the parser should construct a parse tree during // the parsing process. The default value is {@code true}. p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is // implemented as a parser listener so p.field is not directly used by // other parser methods. p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive + + // The list of ParseTreeListener listeners registered to receive // events during the parse. p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. + // incremented each time NotifyErrorListeners is called. p._SyntaxErrors = 0 p.SetInputStream(input) return p } -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with +// This field maps from the serialized ATN string to the deserialized [ATN] with // bypass alternatives. // -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable var bypassAltsAtnCache = make(map[string]int) // reset the parser's state// @@ -143,10 +152,13 @@ func (p *BaseParser) Match(ttype int) Token { p.Consume() } else { t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -178,9 +190,8 @@ func (p *BaseParser) MatchWildcard() Token { } else { t = p.errHandler.RecoverInline(p) if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol + // we must have conjured up a new token during single token + // insertion if it's not the current symbol p.ctx.AddErrorNode(t) } } @@ -202,33 +213,27 @@ func (p *BaseParser) GetParseListeners() []ParseTreeListener { return p.parseListeners } -// Registers {@code listener} to receive events during the parsing process. +// AddParseListener registers listener to receive events during the parsing process. // -//

To support output-preserving grammar transformations (including but not +// To support output-preserving grammar transformations (including but not // limited to left-recursion removal, automated left-factoring, and // optimized code generation), calls to listener methods during the parse // may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In // particular, rule entry and exit events may occur in a different order // during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
+// rule entry methods may be omitted. // -// @param listener the listener to add +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. // -// @panics nilPointerException if {@code} listener is {@code nil} +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. func (p *BaseParser) AddParseListener(listener ParseTreeListener) { if listener == nil { panic("listener") @@ -239,11 +244,10 @@ func (p *BaseParser) AddParseListener(listener ParseTreeListener) { p.parseListeners = append(p.parseListeners, listener) } -// Remove {@code listener} from the list of parse listeners. +// RemoveParseListener removes listener from the list of parse listeners. // -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove +// If listener is nil or has not been added as a parse +// listener, this func does nothing. func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { if p.parseListeners != nil { @@ -274,7 +278,7 @@ func (p *BaseParser) removeParseListeners() { p.parseListeners = nil } -// Notify any parse listeners of an enter rule event. +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. func (p *BaseParser) TriggerEnterRuleEvent() { if p.parseListeners != nil { ctx := p.ctx @@ -285,9 +289,7 @@ func (p *BaseParser) TriggerEnterRuleEvent() { } } -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. func (p *BaseParser) TriggerExitRuleEvent() { if p.parseListeners != nil { // reverse order walk of listeners @@ -314,19 +316,16 @@ func (p *BaseParser) GetTokenFactory() TokenFactory { return p.input.GetTokenSource().GetTokenFactory() } -// Tell our token source and error strategy about a Newway to create tokens.// +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. func (p *BaseParser) setTokenFactory(factory TokenFactory) { p.input.GetTokenSource().setTokenFactory(factory) } -// The ATN with bypass alternatives is expensive to create so we create it +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it // lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. func (p *BaseParser) GetATNWithBypassAlts() { - // TODO + // TODO - Implement this? panic("Not implemented!") // serializedAtn := p.getSerializedATN() @@ -354,6 +353,7 @@ func (p *BaseParser) GetATNWithBypassAlts() { // String id = m.Get("ID") // +//goland:noinspection GoUnusedParameter func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { panic("NewParseTreePatternMatcher not implemented!") @@ -386,14 +386,16 @@ func (p *BaseParser) GetTokenStream() TokenStream { return p.input } -// Set the token stream and reset the parser.// +// SetTokenStream installs input as the token stream and resets the parser. func (p *BaseParser) SetTokenStream(input TokenStream) { p.input = nil p.reset() p.input = input } -// Match needs to return the current input symbol, which gets put +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put // into the label for the associated token ref e.g., x=ID. func (p *BaseParser) GetCurrentToken() Token { return p.input.LT(1) @@ -446,7 +448,7 @@ func (p *BaseParser) addContextToParseTree() { } } -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { p.SetState(state) p.ctx = localctx p.ctx.SetStart(p.input.LT(1)) @@ -474,7 +476,7 @@ func (p *BaseParser) ExitRule() { func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx + // if we have a new localctx, make sure we replace existing ctx // that is previous child of parse tree if p.BuildParseTrees && p.ctx != localctx { if p.ctx.GetParent() != nil { @@ -498,7 +500,7 @@ func (p *BaseParser) GetPrecedence() int { return p.precedenceStack[len(p.precedenceStack)-1] } -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { p.SetState(state) p.precedenceStack.Push(precedence) p.ctx = localctx @@ -512,7 +514,7 @@ func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleI // // Like {@link //EnterRule} but for recursive rules. -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { previous := p.ctx previous.SetParent(localctx) previous.SetInvokingState(state) @@ -530,7 +532,7 @@ func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, } func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() + _, _ = p.precedenceStack.Pop() p.ctx.SetStop(p.input.LT(-1)) retCtx := p.ctx // save current ctx (return value) // unroll so ctx is as it was before call to recursive method @@ -561,29 +563,22 @@ func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { return nil } -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { return precedence >= p.precedenceStack[len(p.precedenceStack)-1] } +//goland:noinspection GoUnusedParameter func (p *BaseParser) inContext(context ParserRuleContext) bool { // TODO: useful in parser? return false } -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is // implemented such that the complete context-sensitive follow set does not // need to be explicitly constructed. // -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - +// return getExpectedTokens().contains(symbol) func (p *BaseParser) IsExpectedToken(symbol int) bool { atn := p.Interpreter.atn ctx := p.ctx @@ -611,11 +606,9 @@ func (p *BaseParser) IsExpectedToken(symbol int) bool { return false } -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], // respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) func (p *BaseParser) GetExpectedTokens() *IntervalSet { return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) } @@ -626,7 +619,7 @@ func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { return atn.NextTokens(s, nil) } -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. func (p *BaseParser) GetRuleIndex(ruleName string) int { var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] if ok { @@ -636,13 +629,10 @@ func (p *BaseParser) GetRuleIndex(ruleName string) int { return -1 } -// Return List<String> of the rule names in your parser instance +// GetRuleInvocationStack returns a list of the rule names in your parser instance // leading up to a call to the current rule. You could override if // you want more details such as the file/line info of where // in the ATN a rule is invoked. -// -// this very useful for error messages. - func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { if c == nil { c = p.ctx @@ -668,16 +658,16 @@ func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { return stack } -// For debugging and other purposes.// +// GetDFAStrings returns a list of all DFA states used for debugging purposes func (p *BaseParser) GetDFAStrings() string { return fmt.Sprint(p.Interpreter.decisionToDFA) } -// For debugging and other purposes.// +// DumpDFA prints the whole of the DFA for debugging func (p *BaseParser) DumpDFA() { seenOne := false for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { + if dfa.Len() > 0 { if seenOne { fmt.Println() } @@ -692,8 +682,10 @@ func (p *BaseParser) GetSourceName() string { return p.GrammarFileName } -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. func (p *BaseParser) SetTrace(trace *TraceListener) { if trace == nil { p.RemoveParseListener(p.tracer) diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go new file mode 100644 index 00000000..ae286969 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -0,0 +1,1668 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var () + +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} + +type ParserATNSimulator struct { + BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *JPCMap + outerContext ParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// +// We also have some key operations to do: +// +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + p.parser.SetError(e) + return ATNInvalidAltNumber, e + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.conflictingAlts + if D.predicates != nil { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue(), nil + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction, nil + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) + case 1: + return alts.minValue(), nil + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue(), nil + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create new target state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if runtimeConfig.parserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.uniqueAlt = predictedAlt + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = p.getConflictingAlts(reach) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.conflictingAlts.minValue()) + } + if D.isAcceptState && D.configs.hasSemanticContext { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach *ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + return alt, p.noViableAlt(input, outerContext, previous, startIndex) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.uniqueAlt = p.getUniqueAlt(reach) + // unique prediction? + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.uniqueAlt != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt, nil + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have non-conflicting alt subsets as in: + // + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + // + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt, nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { + if p.mergeCache == nil { + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") + } + intermediate := NewATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*ATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach *ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet this condition whether it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + + if len(reach.configs) == 0 { + return nil + } + + return reach +} + +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. +// +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions. +// +// When lookToEndOfRule is true, this method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// configs. +// +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' +// +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// statement. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to prog, and then back in to statement +// from being eliminated by the filter. +// +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { + + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) + + for _, config := range configs.configs { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.configs { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.configs { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // unambiguous alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // un-predicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. +// +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// [ParserRuleContext] returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location. +// +// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. +// +// In some scenarios, the algorithm described above could predict an +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +// +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// Teh func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.configs) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.configs { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 *ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) + + for _, c := range configs.configs { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []*ATNConfigSet{succeeded, failed} +} + +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } + } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if c != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + newDepth-- + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewATNConfig4(config, t.getTarget()) +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the runtimeConfig sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. +// +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state config-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.uniqueAlt != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.uniqueAlt) + } else { + conflictingAlts = configs.conflictingAlts + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.configs { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if runtimeConfig.parserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. +// +// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + + existing, present := dfa.Get(d) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. + // + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil + } + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go similarity index 77% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go rename to vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go index 1c8cee74..c249bc13 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -31,7 +31,9 @@ type ParserRuleContext interface { } type BaseParserRuleContext struct { - *BaseRuleContext + parentCtx RuleContext + invokingState int + RuleIndex int start, stop Token exception RecognitionException @@ -40,8 +42,22 @@ type BaseParserRuleContext struct { func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } prc.RuleIndex = -1 // * If we are debugging or building a parse tree for a Visitor, @@ -56,8 +72,6 @@ func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) // The exception that forced prc rule to return. If the rule successfully // completed, prc is {@code nil}. prc.exception = nil - - return prc } func (prc *BaseParserRuleContext) SetException(e RecognitionException) { @@ -90,14 +104,15 @@ func (prc *BaseParserRuleContext) GetText() string { return s } -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { } -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { } -// * Does not set parent link other add methods do that/// +// * Does not set parent link other add methods do that func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { if prc.children == nil { prc.children = make([]Tree, 0) @@ -120,10 +135,9 @@ func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { return child } -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. func (prc *BaseParserRuleContext) RemoveLastChild() { if prc.children != nil && len(prc.children) > 0 { prc.children = prc.children[0 : len(prc.children)-1] @@ -293,7 +307,7 @@ func (prc *BaseParserRuleContext) GetChildCount() int { return len(prc.children) } -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { if prc.start == nil || prc.stop == nil { return TreeInvalidInterval } @@ -340,6 +354,50 @@ func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) s return s } +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) type InterpreterRuleContext interface { @@ -350,6 +408,7 @@ type BaseInterpreterRuleContext struct { *BaseParserRuleContext } +//goland:noinspection GoUnusedExportedFunction func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { prc := new(BaseInterpreterRuleContext) diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 00000000..c1b80cc1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 00000000..25dfb11e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 00000000..3f85a6a5 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go new file mode 100644 index 00000000..2e0b504f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string + SynErr RecognitionException +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +//goland:noinspection GoUnusedGlobalVariable +var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.12.0" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// GetRuleIndexMap Get a map from rule names to rule indexes. +// +// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// GetTokenErrorDisplay shows how a token should be displayed in an error message. +// +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { + return true +} + +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { + return true +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 00000000..f2ad0479 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go similarity index 92% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go rename to vendor/github.com/antlr4-go/antlr/v4/semantic_context.go index a702e99d..68cb9061 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go +++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -9,14 +9,13 @@ import ( "strconv" ) -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// SemanticContext is a tree structure used to record the semantic context in which // -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

+// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. // - +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' type SemanticContext interface { Equals(other Collectable[SemanticContext]) bool Hash() int @@ -80,7 +79,7 @@ func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { var SemanticContextNone = NewPredicate(-1, -1, false) -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { return p } @@ -198,7 +197,7 @@ type AND struct { func NewAND(a, b SemanticContext) *AND { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") if aa, ok := a.(*AND); ok { for _, o := range aa.opnds { operands.Put(o) @@ -230,9 +229,7 @@ func NewAND(a, b SemanticContext) *AND { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) and := new(AND) and.opnds = opnds @@ -316,12 +313,12 @@ func (a *AND) Hash() int { return murmurFinish(h, len(a.opnds)) } -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { h = murmurUpdate(h, op.Hash()) } - return murmurFinish(h, len(a.opnds)) + return murmurFinish(h, len(o.opnds)) } func (a *AND) String() string { @@ -349,7 +346,7 @@ type OR struct { func NewOR(a, b SemanticContext) *OR { - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") if aa, ok := a.(*OR); ok { for _, o := range aa.opnds { operands.Put(o) @@ -382,9 +379,7 @@ func NewOR(a, b SemanticContext) *OR { vs := operands.Values() opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } + copy(opnds, vs) o := new(OR) o.opnds = opnds diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 00000000..70c0673a --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,281 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "sync" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock sync.RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 00000000..4d9eb94e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go similarity index 86% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go rename to vendor/github.com/antlr4-go/antlr/v4/token.go index f73b06bc..9670efb8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -35,6 +35,8 @@ type Token interface { GetTokenSource() TokenSource GetInputStream() CharStream + + String() string } type BaseToken struct { @@ -53,7 +55,7 @@ type BaseToken struct { const ( TokenInvalidType = 0 - // During lookahead operations, this "token" signifies we hit rule end ATN state + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state // and did not follow it despite needing to. TokenEpsilon = -2 @@ -61,15 +63,16 @@ const ( TokenEOF = -1 - // All tokens go to the parser (unless Skip() is called in that rule) + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) // on a particular "channel". The parser tunes to a particular channel // so that whitespace etc... can go to the parser on a "hidden" channel. - TokenDefaultChannel = 0 - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. TokenHiddenChannel = 1 ) @@ -118,21 +121,22 @@ func (b *BaseToken) GetInputStream() CharStream { } type CommonToken struct { - *BaseToken + BaseToken } func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - t := new(CommonToken) - - t.BaseToken = new(BaseToken) + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 if t.source.tokenSource != nil { t.line = source.tokenSource.GetLine() t.column = source.tokenSource.GetCharPositionInLine() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go rename to vendor/github.com/antlr4-go/antlr/v4/token_source.go diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go similarity index 90% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go rename to vendor/github.com/antlr4-go/antlr/v4/token_stream.go index 1527d43f..bf4ff663 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go +++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -8,13 +8,14 @@ type TokenStream interface { IntStream LT(k int) Token + Reset() Get(index int) Token GetTokenSource() TokenSource SetTokenSource(TokenSource) GetAllText() string - GetTextFromInterval(*Interval) string + GetTextFromInterval(Interval) string GetTextFromRuleContext(RuleContext) string GetTextFromTokens(Token, Token) string } diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 00000000..ccf59b46 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,662 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instructionIndex int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + opName string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.opName, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), + }, + lastRewriteTokenIndexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] + if ok { + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) +} + +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, ProgramInitSize) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// GetTextDefault returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + DefaultProgramName, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// GetText returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: +// +// I.i.u I.j.v leave alone, non-overlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// The func returns a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instructionIndex] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instructionIndex] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instructionIndex] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instructionIndex] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instructionIndex] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go similarity index 100% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go rename to vendor/github.com/antlr4-go/antlr/v4/trace_listener.go diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go new file mode 100644 index 00000000..313b0fc1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -0,0 +1,439 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(_, _, _ int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +//goland:noinspection GoUnusedGlobalVariable +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// AtomTransition +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } + t.intervalSet = t.makeLabel() + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, _, _ int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + BaseTransition + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } +} + +func (t *RuleTransition) Matches(_, _, _ int) bool { + return false +} + +type EpsilonTransition struct { + BaseTransition + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } +} + +func (t *EpsilonTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + BaseTransition + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, _, _ int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + BaseAbstractPredicateTransition + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } +} + +func (t *PredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + BaseTransition + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } +} + +func (t *ActionTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } + + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + return t +} + +func (t *SetTransition) Matches(symbol, _, _ int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + BaseAbstractPredicateTransition + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } +} + +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go new file mode 100644 index 00000000..c288420f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + GetSourceInterval() Interval +} + +type ParseTree interface { + SyntaxTree + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + GetRuleContext() RuleContext +} + +type TerminalNode interface { + ParseTree + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } + +// TODO: Implement this? +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(_ int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +//goland:noinspection GoUnusedGlobalVariable +var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go similarity index 81% rename from vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go rename to vendor/github.com/antlr4-go/antlr/v4/trees.go index d7dbb032..f44c05d8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go +++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -8,10 +8,8 @@ import "fmt" /** A set of utility routines useful for all kinds of ANTLR trees. */ -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { if recog != nil { @@ -32,7 +30,7 @@ func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { } for i := 1; i < c; i++ { s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) + res += " " + s } res += ")" return res @@ -62,7 +60,7 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { } } - // no recog for rule names + // no recognition for rule names payload := t.GetPayload() if p2, ok := payload.(Token); ok { return p2.GetText() @@ -71,7 +69,9 @@ func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { return fmt.Sprint(t.GetPayload()) } -// Return ordered list of all children of this node +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction func TreesGetChildren(t Tree) []Tree { list := make([]Tree, 0) for i := 0; i < t.GetChildCount(); i++ { @@ -80,9 +80,10 @@ func TreesGetChildren(t Tree) []Tree { return list } -// Return a list of all ancestors of this node. The first node of +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. // -// list is the root and the last is the parent of this node. +//goland:noinspection GoUnusedExportedFunction func TreesgetAncestors(t Tree) []Tree { ancestors := make([]Tree, 0) t = t.GetParent() @@ -94,10 +95,12 @@ func TreesgetAncestors(t Tree) []Tree { return ancestors } +//goland:noinspection GoUnusedExportedFunction func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { return TreesfindAllNodes(t, ttype, true) } +//goland:noinspection GoUnusedExportedFunction func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { return TreesfindAllNodes(t, ruleIndex, false) } @@ -129,6 +132,7 @@ func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTr } } +//goland:noinspection GoUnusedExportedFunction func TreesDescendants(t ParseTree) []ParseTree { nodes := []ParseTree{t} for i := 0; i < t.GetChildCount(); i++ { diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go new file mode 100644 index 00000000..733d7df9 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -0,0 +1,328 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "os" + "strconv" + "strings" + "syscall" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +const bitsPerWord = 64 + +func indexForBit(bit int) int { + return bit / bitsPerWord +} + +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction +func wordForBit(data []uint64, bit int) uint64 { + idx := indexForBit(bit) + if idx >= len(data) { + return 0 + } + return data[idx] +} + +func maskForBit(bit int) uint64 { + return uint64(1) << (bit % bitsPerWord) +} + +func wordsNeeded(bit int) int { + return indexForBit(bit) + 1 +} + +type BitSet struct { + data []uint64 +} + +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet +func NewBitSet() *BitSet { + return &BitSet{} +} + +func (b *BitSet) add(value int) { + idx := indexForBit(value) + if idx >= len(b.data) { + size := wordsNeeded(value) + data := make([]uint64, size) + copy(data, b.data) + b.data = data + } + b.data[idx] |= maskForBit(value) +} + +func (b *BitSet) clear(index int) { + idx := indexForBit(index) + if idx >= len(b.data) { + return + } + b.data[idx] &= ^maskForBit(index) +} + +func (b *BitSet) or(set *BitSet) { + // Get min size necessary to represent the bits in both sets. + bLen := b.minLen() + setLen := set.minLen() + maxLen := intMax(bLen, setLen) + if maxLen > len(b.data) { + // Increase the size of len(b.data) to represent the bits in both sets. + data := make([]uint64, maxLen) + copy(data, b.data) + b.data = data + } + // len(b.data) is at least setLen. + for i := 0; i < setLen; i++ { + b.data[i] |= set.data[i] + } +} + +func (b *BitSet) remove(value int) { + b.clear(value) +} + +func (b *BitSet) contains(value int) bool { + idx := indexForBit(value) + if idx >= len(b.data) { + return false + } + return (b.data[idx] & maskForBit(value)) != 0 +} + +func (b *BitSet) minValue() int { + for i, v := range b.data { + if v == 0 { + continue + } + return i*bitsPerWord + bits.TrailingZeros64(v) + } + return 2147483647 +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if b == otherBitSet { + return true + } + + // We only compare set bits, so we cannot rely on the two slices having the same size. Its + // possible for two BitSets to have different slice lengths but the same set bits. So we only + // compare the relevant words and ignore the trailing zeros. + bLen := b.minLen() + otherLen := otherBitSet.minLen() + + if bLen != otherLen { + return false + } + + for i := 0; i < bLen; i++ { + if b.data[i] != otherBitSet.data[i] { + return false + } + } + + return true +} + +func (b *BitSet) minLen() int { + for i := len(b.data); i > 0; i-- { + if b.data[i-1] != 0 { + return i + } + } + return 0 +} + +func (b *BitSet) length() int { + cnt := 0 + for _, val := range b.data { + cnt += bits.OnesCount64(val) + } + return cnt +} + +func (b *BitSet) String() string { + vals := make([]string, 0, b.length()) + + for i, v := range b.data { + for v != 0 { + n := bits.TrailingZeros64(v) + vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) + v &= ^(uint64(1) << n) + } + } + + return "{" + strings.Join(vals, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +//goland:noinspection GoUnusedExportedFunction +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +//goland:noinspection GoUnusedExportedFunction +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// murmur hash +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h int, value int) int { + const c1 uint32 = 0xCC9E2D51 + const c2 uint32 = 0x1B873593 + const r1 uint32 = 15 + const r2 uint32 = 13 + const m uint32 = 5 + const n uint32 = 0xE6546B64 + + k := uint32(value) + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + + hash := uint32(h) ^ k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + return int(hash) +} + +func murmurFinish(h int, numberOfWords int) int { + var hash = uint32(h) + hash ^= uint32(numberOfWords) << 2 + hash ^= hash >> 16 + hash *= 0x85ebca6b + hash ^= hash >> 13 + hash *= 0xc2b2ae35 + hash ^= hash >> 16 + + return int(hash) +} + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md index 16abdfc0..9433004a 100644 --- a/vendor/github.com/cenkalti/backoff/v4/README.md +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -1,4 +1,4 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. @@ -21,8 +21,6 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. [godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 [godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master [coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master [coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go index 2c56c1e7..aac99f19 100644 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -71,6 +71,9 @@ type Clock interface { Now() time.Time } +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + // Default values for ExponentialBackOff. const ( DefaultInitialInterval = 500 * time.Millisecond @@ -81,7 +84,7 @@ const ( ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { b := &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, @@ -91,10 +94,62 @@ func NewExponentialBackOff() *ExponentialBackOff { Stop: Stop, Clock: SystemClock, } + for _, fn := range opts { + fn(b) + } b.Reset() return b } +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + type systemClock struct{} func (t systemClock) Now() time.Time { diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b7..33c88305 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c..78bddf1c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a4..78f95f25 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bb..118e49e8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5f..05f5e7df 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd..cf9d42ae 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go index fa010c37..716a1bf0 100644 --- a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -1,31 +1,82 @@ +// Code generated by make; DO NOT EDIT. + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/container-storage-interface/spec/csi.proto +// versions: +// protoc-gen-go v1.33.0 +// protoc v4.25.2 +// source: csi.proto package csi import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type BlockMetadataType int32 + +const ( + BlockMetadataType_UNKNOWN BlockMetadataType = 0 + // The FIXED_LENGTH value indicates that data ranges are + // returned in fixed size blocks. + BlockMetadataType_FIXED_LENGTH BlockMetadataType = 1 + // The VARIABLE_LENGTH value indicates that data ranges + // are returned in potentially variable sized extents. + BlockMetadataType_VARIABLE_LENGTH BlockMetadataType = 2 ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +// Enum value maps for BlockMetadataType. +var ( + BlockMetadataType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "FIXED_LENGTH", + 2: "VARIABLE_LENGTH", + } + BlockMetadataType_value = map[string]int32{ + "UNKNOWN": 0, + "FIXED_LENGTH": 1, + "VARIABLE_LENGTH": 2, + } +) + +func (x BlockMetadataType) Enum() *BlockMetadataType { + p := new(BlockMetadataType) + *p = x + return p +} + +func (x BlockMetadataType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BlockMetadataType) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[0].Descriptor() +} + +func (BlockMetadataType) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[0] +} + +func (x BlockMetadataType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// Deprecated: Use BlockMetadataType.Descriptor instead. +func (BlockMetadataType) EnumDescriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{0} +} type PluginCapability_Service_Type int32 @@ -55,28 +106,58 @@ const ( // well as specific RPCs as indicated by // GroupControllerGetCapabilities. PluginCapability_Service_GROUP_CONTROLLER_SERVICE PluginCapability_Service_Type = 3 + // SNAPSHOT_METADATA_SERVICE indicates that the Plugin provides + // RPCs to retrieve metadata on the allocated blocks of a single + // snapshot, or the changed blocks between a pair of snapshots of + // the same block volume. + // The presence of this capability determines whether the CO will + // attempt to invoke the OPTIONAL SnapshotMetadata service RPCs. + PluginCapability_Service_SNAPSHOT_METADATA_SERVICE PluginCapability_Service_Type = 4 ) -var PluginCapability_Service_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CONTROLLER_SERVICE", - 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", - 3: "GROUP_CONTROLLER_SERVICE", -} +// Enum value maps for PluginCapability_Service_Type. +var ( + PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", + 3: "GROUP_CONTROLLER_SERVICE", + 4: "SNAPSHOT_METADATA_SERVICE", + } + PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, + "GROUP_CONTROLLER_SERVICE": 3, + "SNAPSHOT_METADATA_SERVICE": 4, + } +) -var PluginCapability_Service_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CONTROLLER_SERVICE": 1, - "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, - "GROUP_CONTROLLER_SERVICE": 3, +func (x PluginCapability_Service_Type) Enum() *PluginCapability_Service_Type { + p := new(PluginCapability_Service_Type) + *p = x + return p } func (x PluginCapability_Service_Type) String() string { - return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PluginCapability_Service_Type) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[1].Descriptor() +} + +func (PluginCapability_Service_Type) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[1] +} + +func (x PluginCapability_Service_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use PluginCapability_Service_Type.Descriptor instead. func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0} + return file_csi_proto_rawDescGZIP(), []int{4, 0, 0} } type PluginCapability_VolumeExpansion_Type int32 @@ -126,24 +207,45 @@ const ( PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2 ) -var PluginCapability_VolumeExpansion_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "ONLINE", - 2: "OFFLINE", -} +// Enum value maps for PluginCapability_VolumeExpansion_Type. +var ( + PluginCapability_VolumeExpansion_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ONLINE", + 2: "OFFLINE", + } + PluginCapability_VolumeExpansion_Type_value = map[string]int32{ + "UNKNOWN": 0, + "ONLINE": 1, + "OFFLINE": 2, + } +) -var PluginCapability_VolumeExpansion_Type_value = map[string]int32{ - "UNKNOWN": 0, - "ONLINE": 1, - "OFFLINE": 2, +func (x PluginCapability_VolumeExpansion_Type) Enum() *PluginCapability_VolumeExpansion_Type { + p := new(PluginCapability_VolumeExpansion_Type) + *p = x + return p } func (x PluginCapability_VolumeExpansion_Type) String() string { - return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PluginCapability_VolumeExpansion_Type) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[2].Descriptor() +} + +func (PluginCapability_VolumeExpansion_Type) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[2] } +func (x PluginCapability_VolumeExpansion_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PluginCapability_VolumeExpansion_Type.Descriptor instead. func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0} + return file_csi_proto_rawDescGZIP(), []int{4, 1, 0} } type VolumeCapability_AccessMode_Mode int32 @@ -176,34 +278,55 @@ const ( VolumeCapability_AccessMode_SINGLE_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 7 ) -var VolumeCapability_AccessMode_Mode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SINGLE_NODE_WRITER", - 2: "SINGLE_NODE_READER_ONLY", - 3: "MULTI_NODE_READER_ONLY", - 4: "MULTI_NODE_SINGLE_WRITER", - 5: "MULTI_NODE_MULTI_WRITER", - 6: "SINGLE_NODE_SINGLE_WRITER", - 7: "SINGLE_NODE_MULTI_WRITER", -} +// Enum value maps for VolumeCapability_AccessMode_Mode. +var ( + VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", + 6: "SINGLE_NODE_SINGLE_WRITER", + 7: "SINGLE_NODE_MULTI_WRITER", + } + VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, + "SINGLE_NODE_SINGLE_WRITER": 6, + "SINGLE_NODE_MULTI_WRITER": 7, + } +) -var VolumeCapability_AccessMode_Mode_value = map[string]int32{ - "UNKNOWN": 0, - "SINGLE_NODE_WRITER": 1, - "SINGLE_NODE_READER_ONLY": 2, - "MULTI_NODE_READER_ONLY": 3, - "MULTI_NODE_SINGLE_WRITER": 4, - "MULTI_NODE_MULTI_WRITER": 5, - "SINGLE_NODE_SINGLE_WRITER": 6, - "SINGLE_NODE_MULTI_WRITER": 7, +func (x VolumeCapability_AccessMode_Mode) Enum() *VolumeCapability_AccessMode_Mode { + p := new(VolumeCapability_AccessMode_Mode) + *p = x + return p } func (x VolumeCapability_AccessMode_Mode) String() string { - return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VolumeCapability_AccessMode_Mode) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[3].Descriptor() +} + +func (VolumeCapability_AccessMode_Mode) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[3] +} + +func (x VolumeCapability_AccessMode_Mode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use VolumeCapability_AccessMode_Mode.Descriptor instead. func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0} + return file_csi_proto_rawDescGZIP(), []int{10, 2, 0} } type ControllerServiceCapability_RPC_Type int32 @@ -260,48 +383,74 @@ const ( // SINGLE_NODE_SINGLE_WRITER and/or SINGLE_NODE_MULTI_WRITER are // supported, in order to permit older COs to continue working. ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER ControllerServiceCapability_RPC_Type = 13 + // Indicates the SP supports modifying volume with mutable + // parameters. See ControllerModifyVolume for details. + ControllerServiceCapability_RPC_MODIFY_VOLUME ControllerServiceCapability_RPC_Type = 14 +) + +// Enum value maps for ControllerServiceCapability_RPC_Type. +var ( + ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", + 7: "CLONE_VOLUME", + 8: "PUBLISH_READONLY", + 9: "EXPAND_VOLUME", + 10: "LIST_VOLUMES_PUBLISHED_NODES", + 11: "VOLUME_CONDITION", + 12: "GET_VOLUME", + 13: "SINGLE_NODE_MULTI_WRITER", + 14: "MODIFY_VOLUME", + } + ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, + "CLONE_VOLUME": 7, + "PUBLISH_READONLY": 8, + "EXPAND_VOLUME": 9, + "LIST_VOLUMES_PUBLISHED_NODES": 10, + "VOLUME_CONDITION": 11, + "GET_VOLUME": 12, + "SINGLE_NODE_MULTI_WRITER": 13, + "MODIFY_VOLUME": 14, + } ) -var ControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_VOLUME", - 2: "PUBLISH_UNPUBLISH_VOLUME", - 3: "LIST_VOLUMES", - 4: "GET_CAPACITY", - 5: "CREATE_DELETE_SNAPSHOT", - 6: "LIST_SNAPSHOTS", - 7: "CLONE_VOLUME", - 8: "PUBLISH_READONLY", - 9: "EXPAND_VOLUME", - 10: "LIST_VOLUMES_PUBLISHED_NODES", - 11: "VOLUME_CONDITION", - 12: "GET_VOLUME", - 13: "SINGLE_NODE_MULTI_WRITER", -} - -var ControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_VOLUME": 1, - "PUBLISH_UNPUBLISH_VOLUME": 2, - "LIST_VOLUMES": 3, - "GET_CAPACITY": 4, - "CREATE_DELETE_SNAPSHOT": 5, - "LIST_SNAPSHOTS": 6, - "CLONE_VOLUME": 7, - "PUBLISH_READONLY": 8, - "EXPAND_VOLUME": 9, - "LIST_VOLUMES_PUBLISHED_NODES": 10, - "VOLUME_CONDITION": 11, - "GET_VOLUME": 12, - "SINGLE_NODE_MULTI_WRITER": 13, +func (x ControllerServiceCapability_RPC_Type) Enum() *ControllerServiceCapability_RPC_Type { + p := new(ControllerServiceCapability_RPC_Type) + *p = x + return p } func (x ControllerServiceCapability_RPC_Type) String() string { - return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ControllerServiceCapability_RPC_Type) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[4].Descriptor() } +func (ControllerServiceCapability_RPC_Type) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[4] +} + +func (x ControllerServiceCapability_RPC_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ControllerServiceCapability_RPC_Type.Descriptor instead. func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{31, 0, 0} + return file_csi_proto_rawDescGZIP(), []int{33, 0, 0} } type VolumeUsage_Unit int32 @@ -312,24 +461,45 @@ const ( VolumeUsage_INODES VolumeUsage_Unit = 2 ) -var VolumeUsage_Unit_name = map[int32]string{ - 0: "UNKNOWN", - 1: "BYTES", - 2: "INODES", -} +// Enum value maps for VolumeUsage_Unit. +var ( + VolumeUsage_Unit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BYTES", + 2: "INODES", + } + VolumeUsage_Unit_value = map[string]int32{ + "UNKNOWN": 0, + "BYTES": 1, + "INODES": 2, + } +) -var VolumeUsage_Unit_value = map[string]int32{ - "UNKNOWN": 0, - "BYTES": 1, - "INODES": 2, +func (x VolumeUsage_Unit) Enum() *VolumeUsage_Unit { + p := new(VolumeUsage_Unit) + *p = x + return p } func (x VolumeUsage_Unit) String() string { - return proto.EnumName(VolumeUsage_Unit_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (VolumeUsage_Unit) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[5].Descriptor() +} + +func (VolumeUsage_Unit) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[5] +} + +func (x VolumeUsage_Unit) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VolumeUsage_Unit.Descriptor instead. func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{51, 0} + return file_csi_proto_rawDescGZIP(), []int{53, 0} } type NodeServiceCapability_RPC_Type int32 @@ -371,32 +541,53 @@ const ( NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP NodeServiceCapability_RPC_Type = 6 ) -var NodeServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "STAGE_UNSTAGE_VOLUME", - 2: "GET_VOLUME_STATS", - 3: "EXPAND_VOLUME", - 4: "VOLUME_CONDITION", - 5: "SINGLE_NODE_MULTI_WRITER", - 6: "VOLUME_MOUNT_GROUP", -} +// Enum value maps for NodeServiceCapability_RPC_Type. +var ( + NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", + 2: "GET_VOLUME_STATS", + 3: "EXPAND_VOLUME", + 4: "VOLUME_CONDITION", + 5: "SINGLE_NODE_MULTI_WRITER", + 6: "VOLUME_MOUNT_GROUP", + } + NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, + "GET_VOLUME_STATS": 2, + "EXPAND_VOLUME": 3, + "VOLUME_CONDITION": 4, + "SINGLE_NODE_MULTI_WRITER": 5, + "VOLUME_MOUNT_GROUP": 6, + } +) -var NodeServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "STAGE_UNSTAGE_VOLUME": 1, - "GET_VOLUME_STATS": 2, - "EXPAND_VOLUME": 3, - "VOLUME_CONDITION": 4, - "SINGLE_NODE_MULTI_WRITER": 5, - "VOLUME_MOUNT_GROUP": 6, +func (x NodeServiceCapability_RPC_Type) Enum() *NodeServiceCapability_RPC_Type { + p := new(NodeServiceCapability_RPC_Type) + *p = x + return p } func (x NodeServiceCapability_RPC_Type) String() string { - return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NodeServiceCapability_RPC_Type) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[6].Descriptor() +} + +func (NodeServiceCapability_RPC_Type) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[6] +} + +func (x NodeServiceCapability_RPC_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use NodeServiceCapability_RPC_Type.Descriptor instead. func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{55, 0, 0} + return file_csi_proto_rawDescGZIP(), []int{57, 0, 0} } type GroupControllerServiceCapability_RPC_Type int32 @@ -409,56 +600,88 @@ const ( GroupControllerServiceCapability_RPC_CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT GroupControllerServiceCapability_RPC_Type = 1 ) -var GroupControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT", -} +// Enum value maps for GroupControllerServiceCapability_RPC_Type. +var ( + GroupControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT", + } + GroupControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT": 1, + } +) -var GroupControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_GET_VOLUME_GROUP_SNAPSHOT": 1, +func (x GroupControllerServiceCapability_RPC_Type) Enum() *GroupControllerServiceCapability_RPC_Type { + p := new(GroupControllerServiceCapability_RPC_Type) + *p = x + return p } func (x GroupControllerServiceCapability_RPC_Type) String() string { - return proto.EnumName(GroupControllerServiceCapability_RPC_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (GroupControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{62, 0, 0} +func (GroupControllerServiceCapability_RPC_Type) Descriptor() protoreflect.EnumDescriptor { + return file_csi_proto_enumTypes[7].Descriptor() } -type GetPluginInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (GroupControllerServiceCapability_RPC_Type) Type() protoreflect.EnumType { + return &file_csi_proto_enumTypes[7] } -func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } -func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoRequest) ProtoMessage() {} -func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{0} +func (x GroupControllerServiceCapability_RPC_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +// Deprecated: Use GroupControllerServiceCapability_RPC_Type.Descriptor instead. +func (GroupControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{64, 0, 0} } -func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) + +type GetPluginInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoRequest.Merge(m, src) + +func (x *GetPluginInfoRequest) Reset() { + *x = GetPluginInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPluginInfoRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoRequest.Size(m) + +func (x *GetPluginInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) + +func (*GetPluginInfoRequest) ProtoMessage() {} + +func (x *GetPluginInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo +// Deprecated: Use GetPluginInfoRequest.ProtoReflect.Descriptor instead. +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{0} +} type GetPluginInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name MUST follow domain name notation format // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD // include the plugin's host company name and the plugin name, @@ -470,183 +693,194 @@ type GetPluginInfoResponse struct { // This field is REQUIRED. Value of this field is opaque to the CO. VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"` // This field is OPTIONAL. Values are opaque to the CO. - Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } -func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoResponse) ProtoMessage() {} -func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{1} +func (x *GetPluginInfoResponse) Reset() { + *x = GetPluginInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +func (x *GetPluginInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) -} -func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginInfoResponse.Merge(m, src) -} -func (m *GetPluginInfoResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginInfoResponse.Size(m) -} -func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) + +func (*GetPluginInfoResponse) ProtoMessage() {} + +func (x *GetPluginInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo +// Deprecated: Use GetPluginInfoResponse.ProtoReflect.Descriptor instead. +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{1} +} -func (m *GetPluginInfoResponse) GetName() string { - if m != nil { - return m.Name +func (x *GetPluginInfoResponse) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *GetPluginInfoResponse) GetVendorVersion() string { - if m != nil { - return m.VendorVersion +func (x *GetPluginInfoResponse) GetVendorVersion() string { + if x != nil { + return x.VendorVersion } return "" } -func (m *GetPluginInfoResponse) GetManifest() map[string]string { - if m != nil { - return m.Manifest +func (x *GetPluginInfoResponse) GetManifest() map[string]string { + if x != nil { + return x.Manifest } return nil } type GetPluginCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } -func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesRequest) ProtoMessage() {} -func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{2} +func (x *GetPluginCapabilitiesRequest) Reset() { + *x = GetPluginCapabilitiesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) -} -func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src) -} -func (m *GetPluginCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +func (x *GetPluginCapabilitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) + +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} + +func (x *GetPluginCapabilitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo +// Deprecated: Use GetPluginCapabilitiesRequest.ProtoReflect.Descriptor instead. +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{2} +} type GetPluginCapabilitiesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // All the capabilities that the controller service supports. This // field is OPTIONAL. - Capabilities []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Capabilities []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` } -func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } -func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginCapabilitiesResponse) ProtoMessage() {} -func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{3} +func (x *GetPluginCapabilitiesResponse) Reset() { + *x = GetPluginCapabilitiesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +func (x *GetPluginCapabilitiesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src) -} -func (m *GetPluginCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) -} -func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) + +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} + +func (x *GetPluginCapabilitiesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo +// Deprecated: Use GetPluginCapabilitiesResponse.ProtoReflect.Descriptor instead. +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{3} +} -func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { - if m != nil { - return m.Capabilities +func (x *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if x != nil { + return x.Capabilities } return nil } // Specifies a capability of the plugin. type PluginCapability struct { - // Types that are valid to be assigned to Type: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: // // *PluginCapability_Service_ // *PluginCapability_VolumeExpansion_ - Type isPluginCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type isPluginCapability_Type `protobuf_oneof:"type"` } -func (m *PluginCapability) Reset() { *m = PluginCapability{} } -func (m *PluginCapability) String() string { return proto.CompactTextString(m) } -func (*PluginCapability) ProtoMessage() {} -func (*PluginCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{4} +func (x *PluginCapability) Reset() { + *x = PluginCapability{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PluginCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability.Unmarshal(m, b) -} -func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) -} -func (m *PluginCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability.Merge(m, src) -} -func (m *PluginCapability) XXX_Size() int { - return xxx_messageInfo_PluginCapability.Size(m) -} -func (m *PluginCapability) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability.DiscardUnknown(m) +func (x *PluginCapability) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PluginCapability proto.InternalMessageInfo - -type isPluginCapability_Type interface { - isPluginCapability_Type() -} +func (*PluginCapability) ProtoMessage() {} -type PluginCapability_Service_ struct { - Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"` +func (x *PluginCapability) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type PluginCapability_VolumeExpansion_ struct { - VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"` +// Deprecated: Use PluginCapability.ProtoReflect.Descriptor instead. +func (*PluginCapability) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{4} } -func (*PluginCapability_Service_) isPluginCapability_Type() {} - -func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {} - func (m *PluginCapability) GetType() isPluginCapability_Type { if m != nil { return m.Type @@ -654,138 +888,80 @@ func (m *PluginCapability) GetType() isPluginCapability_Type { return nil } -func (m *PluginCapability) GetService() *PluginCapability_Service { - if x, ok := m.GetType().(*PluginCapability_Service_); ok { +func (x *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := x.GetType().(*PluginCapability_Service_); ok { return x.Service } return nil } -func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion { - if x, ok := m.GetType().(*PluginCapability_VolumeExpansion_); ok { +func (x *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion { + if x, ok := x.GetType().(*PluginCapability_VolumeExpansion_); ok { return x.VolumeExpansion } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*PluginCapability) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*PluginCapability_Service_)(nil), - (*PluginCapability_VolumeExpansion_)(nil), - } -} - -type PluginCapability_Service struct { - Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type isPluginCapability_Type interface { + isPluginCapability_Type() } -func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } -func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } -func (*PluginCapability_Service) ProtoMessage() {} -func (*PluginCapability_Service) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{4, 0} +type PluginCapability_Service_ struct { + // Service that the plugin supports. + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"` } -func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) -} -func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) -} -func (m *PluginCapability_Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_Service.Merge(m, src) -} -func (m *PluginCapability_Service) XXX_Size() int { - return xxx_messageInfo_PluginCapability_Service.Size(m) -} -func (m *PluginCapability_Service) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +type PluginCapability_VolumeExpansion_ struct { + VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"` } -var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo +func (*PluginCapability_Service_) isPluginCapability_Type() {} -func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { - if m != nil { - return m.Type - } - return PluginCapability_Service_UNKNOWN -} +func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {} -type PluginCapability_VolumeExpansion struct { - Type PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ProbeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *PluginCapability_VolumeExpansion) Reset() { *m = PluginCapability_VolumeExpansion{} } -func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) } -func (*PluginCapability_VolumeExpansion) ProtoMessage() {} -func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{4, 1} +func (x *ProbeRequest) Reset() { + *x = ProbeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b) -} -func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic) -} -func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src) -} -func (m *PluginCapability_VolumeExpansion) XXX_Size() int { - return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m) -} -func (m *PluginCapability_VolumeExpansion) XXX_DiscardUnknown() { - xxx_messageInfo_PluginCapability_VolumeExpansion.DiscardUnknown(m) +func (x *ProbeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_PluginCapability_VolumeExpansion proto.InternalMessageInfo +func (*ProbeRequest) ProtoMessage() {} -func (m *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type { - if m != nil { - return m.Type +func (x *ProbeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return PluginCapability_VolumeExpansion_UNKNOWN -} - -type ProbeRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return mi.MessageOf(x) } -func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } -func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } -func (*ProbeRequest) ProtoMessage() {} +// Deprecated: Use ProbeRequest.ProtoReflect.Descriptor instead. func (*ProbeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{5} + return file_csi_proto_rawDescGZIP(), []int{5} } -func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) -} -func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) -} -func (m *ProbeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeRequest.Merge(m, src) -} -func (m *ProbeRequest) XXX_Size() int { - return xxx_messageInfo_ProbeRequest.Size(m) -} -func (m *ProbeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo - type ProbeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Readiness allows a plugin to report its initialization status back // to the CO. Initialization for some plugins MAY be time consuming // and it is important for a CO to distinguish between the following @@ -806,45 +982,53 @@ type ProbeResponse struct { // that the plugin is in a ready state and is accepting calls to its // Controller and/or Node services (according to the plugin's reported // capabilities). - Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ready *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"` } -func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } -func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ProbeResponse) ProtoMessage() {} -func (*ProbeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{6} +func (x *ProbeResponse) Reset() { + *x = ProbeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) -} -func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) -} -func (m *ProbeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbeResponse.Merge(m, src) +func (x *ProbeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProbeResponse) XXX_Size() int { - return xxx_messageInfo_ProbeResponse.Size(m) -} -func (m *ProbeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ProbeResponse.DiscardUnknown(m) + +func (*ProbeResponse) ProtoMessage() {} + +func (x *ProbeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo +// Deprecated: Use ProbeResponse.ProtoReflect.Descriptor instead. +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{6} +} -func (m *ProbeResponse) GetReady() *wrappers.BoolValue { - if m != nil { - return m.Ready +func (x *ProbeResponse) GetReady() *wrapperspb.BoolValue { + if x != nil { + return x.Ready } return nil } type CreateVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The suggested name for the storage space. This field is REQUIRED. // It serves two purposes: // 1. Idempotency - This name is generated by the CO to achieve @@ -895,9 +1079,10 @@ type CreateVolumeRequest struct { // MUST return the appropriate gRPC error code. // This field is REQUIRED. VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. + // Plugin specific creation-time parameters passed in as opaque + // key-value pairs. This field is OPTIONAL. The Plugin is responsible + // for parsing and validating these parameters. COs will treat + // these as opaque. Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Secrets required by plugin to complete volume creation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` @@ -918,81 +1103,97 @@ type CreateVolumeRequest struct { // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY // choose where the provisioned volume is accessible from. AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // Plugins MUST treat these + // as if they take precedence over the parameters field. + // This field SHALL NOT be specified unless the SP has the + // MODIFY_VOLUME plugin capability. + MutableParameters map[string]string `protobuf:"bytes,8,rep,name=mutable_parameters,json=mutableParameters,proto3" json:"mutable_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } -func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeRequest) ProtoMessage() {} -func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{7} +func (x *CreateVolumeRequest) Reset() { + *x = CreateVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) -} -func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) -} -func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeRequest.Merge(m, src) +func (x *CreateVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateVolumeRequest) XXX_Size() int { - return xxx_messageInfo_CreateVolumeRequest.Size(m) -} -func (m *CreateVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) + +func (*CreateVolumeRequest) ProtoMessage() {} + +func (x *CreateVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo +// Deprecated: Use CreateVolumeRequest.ProtoReflect.Descriptor instead. +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{7} +} -func (m *CreateVolumeRequest) GetName() string { - if m != nil { - return m.Name +func (x *CreateVolumeRequest) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange +func (x *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if x != nil { + return x.CapacityRange } return nil } -func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities +func (x *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if x != nil { + return x.VolumeCapabilities } return nil } -func (m *CreateVolumeRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *CreateVolumeRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } -func (m *CreateVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *CreateVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { - if m != nil { - return m.VolumeContentSource +func (x *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if x != nil { + return x.VolumeContentSource } return nil } -func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { - if m != nil { - return m.AccessibilityRequirements +func (x *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if x != nil { + return x.AccessibilityRequirements + } + return nil +} + +func (x *CreateVolumeRequest) GetMutableParameters() map[string]string { + if x != nil { + return x.MutableParameters } return nil } @@ -1000,57 +1201,49 @@ func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequiremen // Specifies what source the volume will be created from. One of the // type fields MUST be specified. type VolumeContentSource struct { - // Types that are valid to be assigned to Type: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: // // *VolumeContentSource_Snapshot // *VolumeContentSource_Volume - Type isVolumeContentSource_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type isVolumeContentSource_Type `protobuf_oneof:"type"` } -func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } -func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource) ProtoMessage() {} -func (*VolumeContentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{8} +func (x *VolumeContentSource) Reset() { + *x = VolumeContentSource{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) -} -func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) -} -func (m *VolumeContentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource.Merge(m, src) -} -func (m *VolumeContentSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource.Size(m) -} -func (m *VolumeContentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +func (x *VolumeContentSource) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo - -type isVolumeContentSource_Type interface { - isVolumeContentSource_Type() -} +func (*VolumeContentSource) ProtoMessage() {} -type VolumeContentSource_Snapshot struct { - Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"` +func (x *VolumeContentSource) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type VolumeContentSource_Volume struct { - Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"` +// Deprecated: Use VolumeContentSource.ProtoReflect.Descriptor instead. +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{8} } -func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} - -func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {} - func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { if m != nil { return m.Type @@ -1058,213 +1251,136 @@ func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { return nil } -func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { - if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { +func (x *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := x.GetType().(*VolumeContentSource_Snapshot); ok { return x.Snapshot } return nil } -func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { - if x, ok := m.GetType().(*VolumeContentSource_Volume); ok { +func (x *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { + if x, ok := x.GetType().(*VolumeContentSource_Volume); ok { return x.Volume } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*VolumeContentSource) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*VolumeContentSource_Snapshot)(nil), - (*VolumeContentSource_Volume)(nil), - } +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() } -type VolumeContentSource_SnapshotSource struct { - // Contains identity information for the existing source snapshot. - // This field is REQUIRED. Plugin is REQUIRED to support creating - // volume from snapshot if it supports the capability - // CREATE_DELETE_SNAPSHOT. - SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"` } -func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } -func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} -func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{8, 0} +type VolumeContentSource_Volume struct { + Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"` } -func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src) -} -func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) -} -func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) -} +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} -var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo +func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {} -func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string { - if m != nil { - return m.SnapshotId - } - return "" -} +type CreateVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type VolumeContentSource_VolumeSource struct { - // Contains identity information for the existing source volume. - // This field is REQUIRED. Plugins reporting CLONE_VOLUME - // capability MUST support creating a volume from another volume. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` } -func (m *VolumeContentSource_VolumeSource) Reset() { *m = VolumeContentSource_VolumeSource{} } -func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) } -func (*VolumeContentSource_VolumeSource) ProtoMessage() {} -func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{8, 1} +func (x *CreateVolumeResponse) Reset() { + *x = CreateVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b) -} -func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic) -} -func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src) -} -func (m *VolumeContentSource_VolumeSource) XXX_Size() int { - return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m) -} -func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m) +func (x *CreateVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo +func (*CreateVolumeResponse) ProtoMessage() {} -func (m *VolumeContentSource_VolumeSource) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *CreateVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" -} - -type CreateVolumeResponse struct { - // Contains all attributes of the newly created volume that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the volume. This field is REQUIRED. - Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return mi.MessageOf(x) } -func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } -func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeResponse) ProtoMessage() {} +// Deprecated: Use CreateVolumeResponse.ProtoReflect.Descriptor instead. func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{9} -} - -func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) -} -func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) -} -func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeResponse.Merge(m, src) -} -func (m *CreateVolumeResponse) XXX_Size() int { - return xxx_messageInfo_CreateVolumeResponse.Size(m) + return file_csi_proto_rawDescGZIP(), []int{9} } -func (m *CreateVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo -func (m *CreateVolumeResponse) GetVolume() *Volume { - if m != nil { - return m.Volume +func (x *CreateVolumeResponse) GetVolume() *Volume { + if x != nil { + return x.Volume } return nil } // Specify a capability of a volume. type VolumeCapability struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Specifies what API the volume will be accessed using. One of the // following fields MUST be specified. // - // Types that are valid to be assigned to AccessType: + // Types that are assignable to AccessType: // // *VolumeCapability_Block // *VolumeCapability_Mount AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` // This is a REQUIRED field. - AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"` } -func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } -func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability) ProtoMessage() {} -func (*VolumeCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{10} +func (x *VolumeCapability) Reset() { + *x = VolumeCapability{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) -} -func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +func (x *VolumeCapability) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability.Merge(m, src) -} -func (m *VolumeCapability) XXX_Size() int { - return xxx_messageInfo_VolumeCapability.Size(m) -} -func (m *VolumeCapability) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo -type isVolumeCapability_AccessType interface { - isVolumeCapability_AccessType() -} +func (*VolumeCapability) ProtoMessage() {} -type VolumeCapability_Block struct { - Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"` +func (x *VolumeCapability) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type VolumeCapability_Mount struct { - Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"` +// Deprecated: Use VolumeCapability.ProtoReflect.Descriptor instead. +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{10} } -func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} - -func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} - func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { if m != nil { return m.AccessType @@ -1272,186 +1388,51 @@ func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { return nil } -func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { +func (x *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := x.GetAccessType().(*VolumeCapability_Block); ok { return x.Block } return nil } -func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { +func (x *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := x.GetAccessType().(*VolumeCapability_Mount); ok { return x.Mount } return nil } -func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { - if m != nil { - return m.AccessMode - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*VolumeCapability) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*VolumeCapability_Block)(nil), - (*VolumeCapability_Mount)(nil), - } -} - -// Indicate that the volume will be accessed via the block device API. -type VolumeCapability_BlockVolume struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } -func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_BlockVolume) ProtoMessage() {} -func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{10, 0} -} - -func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) -} -func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src) -} -func (m *VolumeCapability_BlockVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) -} -func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo - -// Indicate that the volume will be accessed via the filesystem API. -type VolumeCapability_MountVolume struct { - // The filesystem type. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"` - // The mount options that can be used for the volume. This field is - // OPTIONAL. `mount_flags` MAY contain sensitive information. - // Therefore, the CO and the Plugin MUST NOT leak this information - // to untrusted entities. The total size of this repeated field - // SHALL NOT exceed 4 KiB. - MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"` - // If SP has VOLUME_MOUNT_GROUP node capability and CO provides - // this field then SP MUST ensure that the volume_mount_group - // parameter is passed as the group identifier to the underlying - // operating system mount system call, with the understanding - // that the set of available mount call parameters and/or - // mount implementations may vary across operating systems. - // Additionally, new file and/or directory entries written to - // the underlying filesystem SHOULD be permission-labeled in such a - // manner, unless otherwise modified by a workload, that they are - // both readable and writable by said mount group identifier. - // This is an OPTIONAL field. - VolumeMountGroup string `protobuf:"bytes,3,opt,name=volume_mount_group,json=volumeMountGroup,proto3" json:"volume_mount_group,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } -func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_MountVolume) ProtoMessage() {} -func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{10, 1} -} - -func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) -} -func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) -} -func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src) -} -func (m *VolumeCapability_MountVolume) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) -} -func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo - -func (m *VolumeCapability_MountVolume) GetFsType() string { - if m != nil { - return m.FsType - } - return "" -} - -func (m *VolumeCapability_MountVolume) GetMountFlags() []string { - if m != nil { - return m.MountFlags +func (x *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if x != nil { + return x.AccessMode } return nil } -func (m *VolumeCapability_MountVolume) GetVolumeMountGroup() string { - if m != nil { - return m.VolumeMountGroup - } - return "" -} - -// Specify how a volume can be accessed. -type VolumeCapability_AccessMode struct { - // This field is REQUIRED. - Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() } -func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } -func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_AccessMode) ProtoMessage() {} -func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{10, 2} +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"` } -func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) -} -func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) -} -func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src) -} -func (m *VolumeCapability_AccessMode) XXX_Size() int { - return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) -} -func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"` } -var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} -func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { - if m != nil { - return m.Mode - } - return VolumeCapability_AccessMode_UNKNOWN -} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} // The capacity of the storage space in bytes. To specify an exact size, // `required_bytes` and `limit_bytes` SHALL be set to the same value. At // least one of the these fields MUST be specified. type CapacityRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Volume MUST be at least this big. This field is OPTIONAL. // A value of 0 is equal to an unspecified field value. // The value of this field MUST NOT be negative. @@ -1459,53 +1440,61 @@ type CapacityRange struct { // Volume MUST not be bigger than this. This field is OPTIONAL. // A value of 0 is equal to an unspecified field value. // The value of this field MUST NOT be negative. - LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"` } -func (m *CapacityRange) Reset() { *m = CapacityRange{} } -func (m *CapacityRange) String() string { return proto.CompactTextString(m) } -func (*CapacityRange) ProtoMessage() {} -func (*CapacityRange) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{11} +func (x *CapacityRange) Reset() { + *x = CapacityRange{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CapacityRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CapacityRange.Unmarshal(m, b) -} -func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) -} -func (m *CapacityRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_CapacityRange.Merge(m, src) -} -func (m *CapacityRange) XXX_Size() int { - return xxx_messageInfo_CapacityRange.Size(m) +func (x *CapacityRange) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CapacityRange) XXX_DiscardUnknown() { - xxx_messageInfo_CapacityRange.DiscardUnknown(m) + +func (*CapacityRange) ProtoMessage() {} + +func (x *CapacityRange) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CapacityRange proto.InternalMessageInfo +// Deprecated: Use CapacityRange.ProtoReflect.Descriptor instead. +func (*CapacityRange) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{11} +} -func (m *CapacityRange) GetRequiredBytes() int64 { - if m != nil { - return m.RequiredBytes +func (x *CapacityRange) GetRequiredBytes() int64 { + if x != nil { + return x.RequiredBytes } return 0 } -func (m *CapacityRange) GetLimitBytes() int64 { - if m != nil { - return m.LimitBytes +func (x *CapacityRange) GetLimitBytes() int64 { + if x != nil { + return x.LimitBytes } return 0 } // Information about a specific volume. type Volume struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The capacity of the volume in bytes. This field is OPTIONAL. If not // set (value of 0), it indicates that the capacity of the volume is // unknown (e.g., NFS share). @@ -1569,73 +1558,81 @@ type Volume struct { // // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" // in the "region" "R1". - AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` } -func (m *Volume) Reset() { *m = Volume{} } -func (m *Volume) String() string { return proto.CompactTextString(m) } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{12} +func (x *Volume) Reset() { + *x = Volume{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Volume) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Volume.Unmarshal(m, b) -} -func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Volume.Marshal(b, m, deterministic) -} -func (m *Volume) XXX_Merge(src proto.Message) { - xxx_messageInfo_Volume.Merge(m, src) +func (x *Volume) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Volume) XXX_Size() int { - return xxx_messageInfo_Volume.Size(m) -} -func (m *Volume) XXX_DiscardUnknown() { - xxx_messageInfo_Volume.DiscardUnknown(m) + +func (*Volume) ProtoMessage() {} + +func (x *Volume) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Volume proto.InternalMessageInfo +// Deprecated: Use Volume.ProtoReflect.Descriptor instead. +func (*Volume) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{12} +} -func (m *Volume) GetCapacityBytes() int64 { - if m != nil { - return m.CapacityBytes +func (x *Volume) GetCapacityBytes() int64 { + if x != nil { + return x.CapacityBytes } return 0 } -func (m *Volume) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *Volume) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *Volume) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext +func (x *Volume) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } return nil } -func (m *Volume) GetContentSource() *VolumeContentSource { - if m != nil { - return m.ContentSource +func (x *Volume) GetContentSource() *VolumeContentSource { + if x != nil { + return x.ContentSource } return nil } -func (m *Volume) GetAccessibleTopology() []*Topology { - if m != nil { - return m.AccessibleTopology +func (x *Volume) GetAccessibleTopology() []*Topology { + if x != nil { + return x.AccessibleTopology } return nil } type TopologyRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Specifies the list of topologies the provisioned volume MUST be // accessible from. // This field is OPTIONAL. If TopologyRequirement is specified either @@ -1787,47 +1784,51 @@ type TopologyRequirement struct { // combination of "Z3" and other possibilities from the list of // requisite. If that's not possible, it should fall back to a // combination of other possibilities from the list of requisite. - Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"` } -func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } -func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } -func (*TopologyRequirement) ProtoMessage() {} -func (*TopologyRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{13} +func (x *TopologyRequirement) Reset() { + *x = TopologyRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) -} -func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) -} -func (m *TopologyRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologyRequirement.Merge(m, src) +func (x *TopologyRequirement) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TopologyRequirement) XXX_Size() int { - return xxx_messageInfo_TopologyRequirement.Size(m) -} -func (m *TopologyRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) + +func (*TopologyRequirement) ProtoMessage() {} + +func (x *TopologyRequirement) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo +// Deprecated: Use TopologyRequirement.ProtoReflect.Descriptor instead. +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{13} +} -func (m *TopologyRequirement) GetRequisite() []*Topology { - if m != nil { - return m.Requisite +func (x *TopologyRequirement) GetRequisite() []*Topology { + if x != nil { + return x.Requisite } return nil } -func (m *TopologyRequirement) GetPreferred() []*Topology { - if m != nil { - return m.Preferred +func (x *TopologyRequirement) GetPreferred() []*Topology { + if x != nil { + return x.Preferred } return nil } @@ -1862,128 +1863,155 @@ func (m *TopologyRequirement) GetPreferred() []*Topology { // alphanumeric character with '-', '_', '.', or alphanumerics in // between. type Topology struct { - Segments map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Topology) Reset() { *m = Topology{} } -func (m *Topology) String() string { return proto.CompactTextString(m) } -func (*Topology) ProtoMessage() {} -func (*Topology) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{14} + Segments map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Topology) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Topology.Unmarshal(m, b) -} -func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Topology.Marshal(b, m, deterministic) -} -func (m *Topology) XXX_Merge(src proto.Message) { - xxx_messageInfo_Topology.Merge(m, src) +func (x *Topology) Reset() { + *x = Topology{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Topology) XXX_Size() int { - return xxx_messageInfo_Topology.Size(m) + +func (x *Topology) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Topology) XXX_DiscardUnknown() { - xxx_messageInfo_Topology.DiscardUnknown(m) + +func (*Topology) ProtoMessage() {} + +func (x *Topology) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Topology proto.InternalMessageInfo +// Deprecated: Use Topology.ProtoReflect.Descriptor instead. +func (*Topology) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{14} +} -func (m *Topology) GetSegments() map[string]string { - if m != nil { - return m.Segments +func (x *Topology) GetSegments() map[string]string { + if x != nil { + return x.Segments } return nil } type DeleteVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to be deprovisioned. // This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Secrets required by plugin to complete volume deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } -func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeRequest) ProtoMessage() {} -func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{15} +func (x *DeleteVolumeRequest) Reset() { + *x = DeleteVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) -} -func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +func (x *DeleteVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeRequest.Merge(m, src) -} -func (m *DeleteVolumeRequest) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeRequest.Size(m) -} -func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) + +func (*DeleteVolumeRequest) ProtoMessage() {} + +func (x *DeleteVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo +// Deprecated: Use DeleteVolumeRequest.ProtoReflect.Descriptor instead. +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{15} +} -func (m *DeleteVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *DeleteVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *DeleteVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *DeleteVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type DeleteVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } -func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeResponse) ProtoMessage() {} -func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{16} +func (x *DeleteVolumeResponse) Reset() { + *x = DeleteVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) -} -func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +func (x *DeleteVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeResponse.Merge(m, src) -} -func (m *DeleteVolumeResponse) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeResponse.Size(m) -} -func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) + +func (*DeleteVolumeResponse) ProtoMessage() {} + +func (x *DeleteVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo +// Deprecated: Use DeleteVolumeResponse.ProtoReflect.Descriptor instead. +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{16} +} type ControllerPublishVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to be used on a node. // This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` @@ -2008,80 +2036,88 @@ type ControllerPublishVolumeRequest struct { // CreateVolumeResponse.Volume.volume_context. // This field is OPTIONAL and MUST match the volume_context of the // volume identified by `volume_id`. - VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } -func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeRequest) ProtoMessage() {} -func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{17} +func (x *ControllerPublishVolumeRequest) Reset() { + *x = ControllerPublishVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) -} -func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src) -} -func (m *ControllerPublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) -} -func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +func (x *ControllerPublishVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo +func (*ControllerPublishVolumeRequest) ProtoMessage() {} -func (m *ControllerPublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ControllerPublishVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *ControllerPublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId +// Deprecated: Use ControllerPublishVolumeRequest.ProtoReflect.Descriptor instead. +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{17} +} + +func (x *ControllerPublishVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability +func (x *ControllerPublishVolumeRequest) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +func (x *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if x != nil { + return x.VolumeCapability } return nil } -func (m *ControllerPublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly +func (x *ControllerPublishVolumeRequest) GetReadonly() bool { + if x != nil { + return x.Readonly } return false } -func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *ControllerPublishVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext +func (x *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } return nil } type ControllerPublishVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Opaque static publish properties of the volume. SP MAY use this // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` // calls calls have contextual information. @@ -2095,45 +2131,53 @@ type ControllerPublishVolumeResponse struct { // identify the volume. // This field is OPTIONAL and when present MUST be passed to // subsequent `NodeStageVolume` or `NodePublishVolume` calls - PublishContext map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + PublishContext map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } -func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeResponse) ProtoMessage() {} -func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{18} +func (x *ControllerPublishVolumeResponse) Reset() { + *x = ControllerPublishVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +func (x *ControllerPublishVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src) -} -func (m *ControllerPublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) -} -func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) + +func (*ControllerPublishVolumeResponse) ProtoMessage() {} + +func (x *ControllerPublishVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo +// Deprecated: Use ControllerPublishVolumeResponse.ProtoReflect.Descriptor instead. +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{18} +} -func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string { - if m != nil { - return m.PublishContext +func (x *ControllerPublishVolumeResponse) GetPublishContext() map[string]string { + if x != nil { + return x.PublishContext } return nil } type ControllerUnpublishVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The ID of the node. This field is OPTIONAL. The CO SHOULD set this @@ -2147,90 +2191,105 @@ type ControllerUnpublishVolumeRequest struct { // ControllerPublishVolume call for the specified volume. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } -func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} -func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{19} +func (x *ControllerUnpublishVolumeRequest) Reset() { + *x = ControllerUnpublishVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +func (x *ControllerUnpublishVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src) -} -func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) -} -func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) + +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} + +func (x *ControllerUnpublishVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo +// Deprecated: Use ControllerUnpublishVolumeRequest.ProtoReflect.Descriptor instead. +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{19} +} -func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId +func (x *ControllerUnpublishVolumeRequest) GetNodeId() string { + if x != nil { + return x.NodeId } return "" } -func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type ControllerUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } -func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} -func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{20} +func (x *ControllerUnpublishVolumeResponse) Reset() { + *x = ControllerUnpublishVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src) -} -func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +func (x *ControllerUnpublishVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) + +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} + +func (x *ControllerUnpublishVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo +// Deprecated: Use ControllerUnpublishVolumeResponse.ProtoReflect.Descriptor instead. +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{20} +} type ValidateVolumeCapabilitiesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to check. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // Volume context as returned by SP in @@ -2248,73 +2307,91 @@ type ValidateVolumeCapabilitiesRequest struct { // Secrets required by plugin to complete volume validation request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // See CreateVolumeRequest.mutable_parameters. + // This field is OPTIONAL. + MutableParameters map[string]string `protobuf:"bytes,6,rep,name=mutable_parameters,json=mutableParameters,proto3" json:"mutable_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } -func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{21} +func (x *ValidateVolumeCapabilitiesRequest) Reset() { + *x = ValidateVolumeCapabilitiesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src) +func (x *ValidateVolumeCapabilitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) -} -func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) + +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} + +func (x *ValidateVolumeCapabilitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo +// Deprecated: Use ValidateVolumeCapabilitiesRequest.ProtoReflect.Descriptor instead. +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{21} +} -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext +func (x *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } return nil } -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities +func (x *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if x != nil { + return x.VolumeCapabilities } return nil } -func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } -func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets + } + return nil +} + +func (x *ValidateVolumeCapabilitiesRequest) GetMutableParameters() map[string]string { + if x != nil { + return x.MutableParameters } return nil } type ValidateVolumeCapabilitiesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Confirmed indicates to the CO the set of capabilities that the // plugin has validated. This field SHALL only be set to a non-empty // value for successful validation responses. @@ -2327,117 +2404,60 @@ type ValidateVolumeCapabilitiesResponse struct { // Message to the CO if `confirmed` above is empty. This field is // OPTIONAL. // An empty string is equal to an unspecified field value. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } -func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{22} -} - -func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) -} -func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed { - if m != nil { - return m.Confirmed - } - return nil + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` } -func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { - if m != nil { - return m.Message +func (x *ValidateVolumeCapabilitiesResponse) Reset() { + *x = ValidateVolumeCapabilitiesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type ValidateVolumeCapabilitiesResponse_Confirmed struct { - // Volume context validated by the plugin. - // This field is OPTIONAL. - VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Volume capabilities supported by the plugin. - // This field is REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` - // The volume creation parameters validated by the plugin. - // This field is OPTIONAL. - Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ValidateVolumeCapabilitiesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() { - *m = ValidateVolumeCapabilitiesResponse_Confirmed{} -} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { - return proto.CompactTextString(m) -} -func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{22, 0} -} +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b) -} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic) -} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src) -} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int { - return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m) -} -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() { - xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m) +func (x *ValidateVolumeCapabilitiesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo - -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext - } - return nil +// Deprecated: Use ValidateVolumeCapabilitiesResponse.ProtoReflect.Descriptor instead. +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{22} } -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities +func (x *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed { + if x != nil { + return x.Confirmed } return nil } -func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if x != nil { + return x.Message } - return nil + return "" } type ListVolumesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // If specified (non-zero value), the Plugin MUST NOT return more // entries than this number in the response. If the actual number of // entries is more than this number, the Plugin MUST set `next_token` @@ -2451,52 +2471,60 @@ type ListVolumesRequest struct { // `next_token` returned by a previous `ListVolumes` call to get the // next page of entries. This field is OPTIONAL. // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` } -func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } -func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } -func (*ListVolumesRequest) ProtoMessage() {} -func (*ListVolumesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{23} +func (x *ListVolumesRequest) Reset() { + *x = ListVolumesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) -} -func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) -} -func (m *ListVolumesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesRequest.Merge(m, src) -} -func (m *ListVolumesRequest) XXX_Size() int { - return xxx_messageInfo_ListVolumesRequest.Size(m) +func (x *ListVolumesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListVolumesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) + +func (*ListVolumesRequest) ProtoMessage() {} + +func (x *ListVolumesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo +// Deprecated: Use ListVolumesRequest.ProtoReflect.Descriptor instead. +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{23} +} -func (m *ListVolumesRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries +func (x *ListVolumesRequest) GetMaxEntries() int32 { + if x != nil { + return x.MaxEntries } return 0 } -func (m *ListVolumesRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken +func (x *ListVolumesRequest) GetStartingToken() string { + if x != nil { + return x.StartingToken } return "" } type ListVolumesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` // This token allows you to get the next page of entries for // `ListVolumes` request. If the number of entries is larger than @@ -2504,314 +2532,280 @@ type ListVolumesResponse struct { // `starting_token` field in the next `ListVolumes` request. This // field is OPTIONAL. // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` } -func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } -func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse) ProtoMessage() {} -func (*ListVolumesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{24} +func (x *ListVolumesResponse) Reset() { + *x = ListVolumesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) -} -func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) -} -func (m *ListVolumesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse.Merge(m, src) +func (x *ListVolumesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListVolumesResponse) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse.Size(m) -} -func (m *ListVolumesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) + +func (*ListVolumesResponse) ProtoMessage() {} + +func (x *ListVolumesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo +// Deprecated: Use ListVolumesResponse.ProtoReflect.Descriptor instead. +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{24} +} -func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { - if m != nil { - return m.Entries +func (x *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if x != nil { + return x.Entries } return nil } -func (m *ListVolumesResponse) GetNextToken() string { - if m != nil { - return m.NextToken +func (x *ListVolumesResponse) GetNextToken() string { + if x != nil { + return x.NextToken } return "" } -type ListVolumesResponse_VolumeStatus struct { - // A list of all `node_id` of nodes that the volume in this entry - // is controller published on. - // This field is OPTIONAL. If it is not specified and the SP has - // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO - // MAY assume the volume is not controller published to any nodes. - // If the field is not specified and the SP does not have the - // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST - // not interpret this field. - // published_node_ids MAY include nodes not published to or - // reported by the SP. The CO MUST be resilient to that. - PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` - // Information about the current condition of the volume. - // This field is OPTIONAL. - // This field MUST be specified if the - // VOLUME_CONDITION controller capability is supported. - VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type ControllerGetVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ListVolumesResponse_VolumeStatus) Reset() { *m = ListVolumesResponse_VolumeStatus{} } -func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse_VolumeStatus) ProtoMessage() {} -func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{24, 0} + // The ID of the volume to fetch current volume information for. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` } -func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b) -} -func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic) -} -func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src) -} -func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m) +func (x *ControllerGetVolumeRequest) Reset() { + *x = ControllerGetVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m) + +func (x *ControllerGetVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo +func (*ControllerGetVolumeRequest) ProtoMessage() {} -func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string { - if m != nil { - return m.PublishedNodeIds +func (x *ControllerGetVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *ListVolumesResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition { - if m != nil { - return m.VolumeCondition +// Deprecated: Use ControllerGetVolumeRequest.ProtoReflect.Descriptor instead. +func (*ControllerGetVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{25} +} + +func (x *ControllerGetVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } - return nil + return "" } -type ListVolumesResponse_Entry struct { +type ControllerGetVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This field is REQUIRED Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` - // This field is OPTIONAL. This field MUST be specified if the - // LIST_VOLUMES_PUBLISHED_NODES controller capability is - // supported. - Status *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // This field is REQUIRED. + Status *ControllerGetVolumeResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } -func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } -func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse_Entry) ProtoMessage() {} -func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{24, 1} +func (x *ControllerGetVolumeResponse) Reset() { + *x = ControllerGetVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +func (x *ControllerGetVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) -} -func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src) -} -func (m *ListVolumesResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) -} -func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) + +func (*ControllerGetVolumeResponse) ProtoMessage() {} + +func (x *ControllerGetVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo +// Deprecated: Use ControllerGetVolumeResponse.ProtoReflect.Descriptor instead. +func (*ControllerGetVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{26} +} -func (m *ListVolumesResponse_Entry) GetVolume() *Volume { - if m != nil { - return m.Volume +func (x *ControllerGetVolumeResponse) GetVolume() *Volume { + if x != nil { + return x.Volume } return nil } -func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus { - if m != nil { - return m.Status +func (x *ControllerGetVolumeResponse) GetStatus() *ControllerGetVolumeResponse_VolumeStatus { + if x != nil { + return x.Status } return nil } -type ControllerGetVolumeRequest struct { - // The ID of the volume to fetch current volume information for. +type ControllerModifyVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains identity information for the existing volume. // This field is REQUIRED. - VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Secrets required by plugin to complete modify volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Plugin specific volume attributes to mutate, passed in as + // opaque key-value pairs. + // This field is REQUIRED. The Plugin is responsible for + // parsing and validating these parameters. COs will treat these + // as opaque. The CO SHOULD specify the intended values of all mutable + // parameters it intends to modify. SPs MUST NOT modify volumes based + // on the absence of keys, only keys that are specified should result + // in modifications to the volume. + MutableParameters map[string]string `protobuf:"bytes,3,rep,name=mutable_parameters,json=mutableParameters,proto3" json:"mutable_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ControllerGetVolumeRequest) Reset() { *m = ControllerGetVolumeRequest{} } -func (m *ControllerGetVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerGetVolumeRequest) ProtoMessage() {} -func (*ControllerGetVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{25} +func (x *ControllerModifyVolumeRequest) Reset() { + *x = ControllerModifyVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerGetVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerGetVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetVolumeRequest.Marshal(b, m, deterministic) -} -func (m *ControllerGetVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetVolumeRequest.Merge(m, src) -} -func (m *ControllerGetVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerGetVolumeRequest.Size(m) -} -func (m *ControllerGetVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetVolumeRequest.DiscardUnknown(m) +func (x *ControllerModifyVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ControllerGetVolumeRequest proto.InternalMessageInfo +func (*ControllerModifyVolumeRequest) ProtoMessage() {} -func (m *ControllerGetVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ControllerModifyVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" -} - -type ControllerGetVolumeResponse struct { - // This field is REQUIRED - Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` - // This field is REQUIRED. - Status *ControllerGetVolumeResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return mi.MessageOf(x) } -func (m *ControllerGetVolumeResponse) Reset() { *m = ControllerGetVolumeResponse{} } -func (m *ControllerGetVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerGetVolumeResponse) ProtoMessage() {} -func (*ControllerGetVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{26} +// Deprecated: Use ControllerModifyVolumeRequest.ProtoReflect.Descriptor instead. +func (*ControllerModifyVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{27} } -func (m *ControllerGetVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetVolumeResponse.Unmarshal(m, b) -} -func (m *ControllerGetVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetVolumeResponse.Marshal(b, m, deterministic) -} -func (m *ControllerGetVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetVolumeResponse.Merge(m, src) -} -func (m *ControllerGetVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerGetVolumeResponse.Size(m) -} -func (m *ControllerGetVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetVolumeResponse.DiscardUnknown(m) +func (x *ControllerModifyVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId + } + return "" } -var xxx_messageInfo_ControllerGetVolumeResponse proto.InternalMessageInfo - -func (m *ControllerGetVolumeResponse) GetVolume() *Volume { - if m != nil { - return m.Volume +func (x *ControllerModifyVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *ControllerGetVolumeResponse) GetStatus() *ControllerGetVolumeResponse_VolumeStatus { - if m != nil { - return m.Status +func (x *ControllerModifyVolumeRequest) GetMutableParameters() map[string]string { + if x != nil { + return x.MutableParameters } return nil } -type ControllerGetVolumeResponse_VolumeStatus struct { - // A list of all the `node_id` of nodes that this volume is - // controller published on. - // This field is OPTIONAL. - // This field MUST be specified if the LIST_VOLUMES_PUBLISHED_NODES - // controller capability is supported. - // published_node_ids MAY include nodes not published to or - // reported by the SP. The CO MUST be resilient to that. - PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` - // Information about the current condition of the volume. - // This field is OPTIONAL. - // This field MUST be specified if the - // VOLUME_CONDITION controller capability is supported. - VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type ControllerModifyVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *ControllerGetVolumeResponse_VolumeStatus) Reset() { - *m = ControllerGetVolumeResponse_VolumeStatus{} -} -func (m *ControllerGetVolumeResponse_VolumeStatus) String() string { return proto.CompactTextString(m) } -func (*ControllerGetVolumeResponse_VolumeStatus) ProtoMessage() {} -func (*ControllerGetVolumeResponse_VolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{26, 0} +func (x *ControllerModifyVolumeResponse) Reset() { + *x = ControllerModifyVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Unmarshal(m, b) -} -func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Marshal(b, m, deterministic) -} -func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Merge(m, src) -} -func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_Size() int { - return xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.Size(m) -} -func (m *ControllerGetVolumeResponse_VolumeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus.DiscardUnknown(m) +func (x *ControllerModifyVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ControllerGetVolumeResponse_VolumeStatus proto.InternalMessageInfo +func (*ControllerModifyVolumeResponse) ProtoMessage() {} -func (m *ControllerGetVolumeResponse_VolumeStatus) GetPublishedNodeIds() []string { - if m != nil { - return m.PublishedNodeIds +func (x *ControllerModifyVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *ControllerGetVolumeResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition { - if m != nil { - return m.VolumeCondition - } - return nil +// Deprecated: Use ControllerModifyVolumeResponse.ProtoReflect.Descriptor instead. +func (*ControllerModifyVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{28} } type GetCapacityRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // If specified, the Plugin SHALL report the capacity of the storage // that can be used to provision volumes that satisfy ALL of the // specified `volume_capabilities`. These are the same @@ -2829,59 +2823,67 @@ type GetCapacityRequest struct { // `accessible_topology` the CO returns in a `CreateVolumeResponse`. // This field is OPTIONAL. This field SHALL NOT be set unless the // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` } -func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } -func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } -func (*GetCapacityRequest) ProtoMessage() {} -func (*GetCapacityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{27} +func (x *GetCapacityRequest) Reset() { + *x = GetCapacityRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) -} -func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) -} -func (m *GetCapacityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityRequest.Merge(m, src) -} -func (m *GetCapacityRequest) XXX_Size() int { - return xxx_messageInfo_GetCapacityRequest.Size(m) +func (x *GetCapacityRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetCapacityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) + +func (*GetCapacityRequest) ProtoMessage() {} + +func (x *GetCapacityRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo +// Deprecated: Use GetCapacityRequest.ProtoReflect.Descriptor instead. +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{29} +} -func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities +func (x *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if x != nil { + return x.VolumeCapabilities } return nil } -func (m *GetCapacityRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *GetCapacityRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } -func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology +func (x *GetCapacityRequest) GetAccessibleTopology() *Topology { + if x != nil { + return x.AccessibleTopology } return nil } type GetCapacityResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The available capacity, in bytes, of the storage that can be used // to provision volumes. If `volume_capabilities` or `parameters` is // specified in the request, the Plugin SHALL take those into @@ -2904,7 +2906,7 @@ type GetCapacityResponse struct { // a maximum size for individual volumes and leave it unset // otherwise. COs MAY use it to make decision about // where to create volumes. - MaximumVolumeSize *wrappers.Int64Value `protobuf:"bytes,2,opt,name=maximum_volume_size,json=maximumVolumeSize,proto3" json:"maximum_volume_size,omitempty"` + MaximumVolumeSize *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=maximum_volume_size,json=maximumVolumeSize,proto3" json:"maximum_volume_size,omitempty"` // The smallest size that may be used in a // CreateVolumeRequest.capacity_range.limit_bytes field // to create a volume with the same parameters as those in @@ -2920,176 +2922,193 @@ type GetCapacityResponse struct { // a minimum size for individual volumes and leave it unset // otherwise. COs MAY use it to make decision about // where to create volumes. - MinimumVolumeSize *wrappers.Int64Value `protobuf:"bytes,3,opt,name=minimum_volume_size,json=minimumVolumeSize,proto3" json:"minimum_volume_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + MinimumVolumeSize *wrapperspb.Int64Value `protobuf:"bytes,3,opt,name=minimum_volume_size,json=minimumVolumeSize,proto3" json:"minimum_volume_size,omitempty"` } -func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } -func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } -func (*GetCapacityResponse) ProtoMessage() {} -func (*GetCapacityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{28} +func (x *GetCapacityResponse) Reset() { + *x = GetCapacityResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) -} -func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +func (x *GetCapacityResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetCapacityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetCapacityResponse.Merge(m, src) -} -func (m *GetCapacityResponse) XXX_Size() int { - return xxx_messageInfo_GetCapacityResponse.Size(m) -} -func (m *GetCapacityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) + +func (*GetCapacityResponse) ProtoMessage() {} + +func (x *GetCapacityResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo +// Deprecated: Use GetCapacityResponse.ProtoReflect.Descriptor instead. +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{30} +} -func (m *GetCapacityResponse) GetAvailableCapacity() int64 { - if m != nil { - return m.AvailableCapacity +func (x *GetCapacityResponse) GetAvailableCapacity() int64 { + if x != nil { + return x.AvailableCapacity } return 0 } -func (m *GetCapacityResponse) GetMaximumVolumeSize() *wrappers.Int64Value { - if m != nil { - return m.MaximumVolumeSize +func (x *GetCapacityResponse) GetMaximumVolumeSize() *wrapperspb.Int64Value { + if x != nil { + return x.MaximumVolumeSize } return nil } -func (m *GetCapacityResponse) GetMinimumVolumeSize() *wrappers.Int64Value { - if m != nil { - return m.MinimumVolumeSize +func (x *GetCapacityResponse) GetMinimumVolumeSize() *wrapperspb.Int64Value { + if x != nil { + return x.MinimumVolumeSize } return nil } type ControllerGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } -func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} -func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{29} +func (x *ControllerGetCapabilitiesRequest) Reset() { + *x = ControllerGetCapabilitiesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +func (x *ControllerGetCapabilitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src) -} -func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) -} -func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) + +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} + +func (x *ControllerGetCapabilitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo +// Deprecated: Use ControllerGetCapabilitiesRequest.ProtoReflect.Descriptor instead. +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{31} +} type ControllerGetCapabilitiesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // All the capabilities that the controller service supports. This // field is OPTIONAL. - Capabilities []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Capabilities []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` } -func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } -func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} -func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{30} +func (x *ControllerGetCapabilitiesResponse) Reset() { + *x = ControllerGetCapabilitiesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +func (x *ControllerGetCapabilitiesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src) -} -func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) -} -func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) + +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} + +func (x *ControllerGetCapabilitiesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo +// Deprecated: Use ControllerGetCapabilitiesResponse.ProtoReflect.Descriptor instead. +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{32} +} -func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { - if m != nil { - return m.Capabilities +func (x *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if x != nil { + return x.Capabilities } return nil } // Specifies a capability of the controller service. type ControllerServiceCapability struct { - // Types that are valid to be assigned to Type: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: // // *ControllerServiceCapability_Rpc - Type isControllerServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` } -func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } -func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability) ProtoMessage() {} -func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{31} +func (x *ControllerServiceCapability) Reset() { + *x = ControllerServiceCapability{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) -} -func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) -} -func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability.Merge(m, src) -} -func (m *ControllerServiceCapability) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability.Size(m) -} -func (m *ControllerServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +func (x *ControllerServiceCapability) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo +func (*ControllerServiceCapability) ProtoMessage() {} -type isControllerServiceCapability_Type interface { - isControllerServiceCapability_Type() +func (x *ControllerServiceCapability) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type ControllerServiceCapability_Rpc struct { - Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +// Deprecated: Use ControllerServiceCapability.ProtoReflect.Descriptor instead. +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{33} } -func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} - func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { if m != nil { return m.Type @@ -3097,60 +3116,29 @@ func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Ty return nil } -func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { - if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { +func (x *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := x.GetType().(*ControllerServiceCapability_Rpc); ok { return x.Rpc } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ControllerServiceCapability_Rpc)(nil), - } -} - -type ControllerServiceCapability_RPC struct { - Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } -func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability_RPC) ProtoMessage() {} -func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{31, 0} +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() } -func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) -} -func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src) -} -func (m *ControllerServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) -} -func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +type ControllerServiceCapability_Rpc struct { + // RPC that the controller supports. + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` } -var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo - -func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return ControllerServiceCapability_RPC_UNKNOWN -} +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} type CreateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the source volume to be snapshotted. // This field is REQUIRED. SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` @@ -3175,109 +3163,125 @@ type CreateSnapshotRequest struct { // - Specify if the snapshot should be replicated to some place. // - Specify primary or secondary for replication systems that // support snapshotting only on primary. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } -func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotRequest) ProtoMessage() {} -func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{32} +func (x *CreateSnapshotRequest) Reset() { + *x = CreateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) -} -func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) -} -func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotRequest.Merge(m, src) +func (x *CreateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotRequest.Size(m) -} -func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) + +func (*CreateSnapshotRequest) ProtoMessage() {} + +func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo +// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{34} +} -func (m *CreateSnapshotRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId +func (x *CreateSnapshotRequest) GetSourceVolumeId() string { + if x != nil { + return x.SourceVolumeId } return "" } -func (m *CreateSnapshotRequest) GetName() string { - if m != nil { - return m.Name +func (x *CreateSnapshotRequest) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *CreateSnapshotRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *CreateSnapshotRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *CreateSnapshotRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *CreateSnapshotRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } type CreateSnapshotResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Contains all attributes of the newly created snapshot that are // relevant to the CO along with information required by the Plugin // to uniquely identify the snapshot. This field is REQUIRED. - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` } -func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } -func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotResponse) ProtoMessage() {} -func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{33} +func (x *CreateSnapshotResponse) Reset() { + *x = CreateSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) -} -func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) -} -func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSnapshotResponse.Merge(m, src) -} -func (m *CreateSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_CreateSnapshotResponse.Size(m) +func (x *CreateSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) + +func (*CreateSnapshotResponse) ProtoMessage() {} + +func (x *CreateSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo +// Deprecated: Use CreateSnapshotResponse.ProtoReflect.Descriptor instead. +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{35} +} -func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot +func (x *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if x != nil { + return x.Snapshot } return nil } // Information about a specific snapshot. type Snapshot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This is the complete size of the snapshot in bytes. The purpose of // this field is to give CO guidance on how much space is needed to // create a volume from this snapshot. The size of the volume MUST NOT @@ -3302,7 +3306,7 @@ type Snapshot struct { SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` // Timestamp when the point-in-time snapshot is taken on the storage // system. This field is REQUIRED. - CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` // Indicates if a snapshot is ready to use as a // `volume_content_source` in a `CreateVolumeRequest`. The default // value is false. This field is REQUIRED. @@ -3320,165 +3324,188 @@ type Snapshot struct { // and SP SHALL allow it to be deleted separately. // If this message is inside a VolumeGroupSnapshot message, the value // MUST be the same as the group_snapshot_id in that message. - GroupSnapshotId string `protobuf:"bytes,6,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + GroupSnapshotId string `protobuf:"bytes,6,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` } -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{34} +func (x *Snapshot) Reset() { + *x = Snapshot{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) +func (x *Snapshot) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) + +func (*Snapshot) ProtoMessage() {} + +func (x *Snapshot) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. +func (*Snapshot) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{36} +} -func (m *Snapshot) GetSizeBytes() int64 { - if m != nil { - return m.SizeBytes +func (x *Snapshot) GetSizeBytes() int64 { + if x != nil { + return x.SizeBytes } return 0 } -func (m *Snapshot) GetSnapshotId() string { - if m != nil { - return m.SnapshotId +func (x *Snapshot) GetSnapshotId() string { + if x != nil { + return x.SnapshotId } return "" } -func (m *Snapshot) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId +func (x *Snapshot) GetSourceVolumeId() string { + if x != nil { + return x.SourceVolumeId } return "" } -func (m *Snapshot) GetCreationTime() *timestamp.Timestamp { - if m != nil { - return m.CreationTime +func (x *Snapshot) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime } return nil } -func (m *Snapshot) GetReadyToUse() bool { - if m != nil { - return m.ReadyToUse +func (x *Snapshot) GetReadyToUse() bool { + if x != nil { + return x.ReadyToUse } return false } -func (m *Snapshot) GetGroupSnapshotId() string { - if m != nil { - return m.GroupSnapshotId +func (x *Snapshot) GetGroupSnapshotId() string { + if x != nil { + return x.GroupSnapshotId } return "" } type DeleteSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the snapshot to be deleted. // This field is REQUIRED. SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // Secrets required by plugin to complete snapshot deletion request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } -func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotRequest) ProtoMessage() {} -func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{35} +func (x *DeleteSnapshotRequest) Reset() { + *x = DeleteSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) -} -func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +func (x *DeleteSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src) -} -func (m *DeleteSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotRequest.Size(m) -} -func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) + +func (*DeleteSnapshotRequest) ProtoMessage() {} + +func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo +// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{37} +} -func (m *DeleteSnapshotRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId +func (x *DeleteSnapshotRequest) GetSnapshotId() string { + if x != nil { + return x.SnapshotId } return "" } -func (m *DeleteSnapshotRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *DeleteSnapshotRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type DeleteSnapshotResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } -func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotResponse) ProtoMessage() {} -func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{36} +func (x *DeleteSnapshotResponse) Reset() { + *x = DeleteSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) -} -func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +func (x *DeleteSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src) -} -func (m *DeleteSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_DeleteSnapshotResponse.Size(m) -} -func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) + +func (*DeleteSnapshotResponse) ProtoMessage() {} + +func (x *DeleteSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo +// Deprecated: Use DeleteSnapshotResponse.ProtoReflect.Descriptor instead. +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{38} +} // List all snapshots on the storage system regardless of how they were // created. type ListSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // If specified (non-zero value), the Plugin MUST NOT return more // entries than this number in the response. If the actual number of // entries is more than this number, the Plugin MUST set `next_token` @@ -3505,73 +3532,81 @@ type ListSnapshotsRequest struct { // Secrets required by plugin to complete ListSnapshot request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } -func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsRequest) ProtoMessage() {} -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{37} +func (x *ListSnapshotsRequest) Reset() { + *x = ListSnapshotsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) -} -func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +func (x *ListSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) -} -func (m *ListSnapshotsRequest) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsRequest.Size(m) -} -func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) + +func (*ListSnapshotsRequest) ProtoMessage() {} + +func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo +// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{39} +} -func (m *ListSnapshotsRequest) GetMaxEntries() int32 { - if m != nil { - return m.MaxEntries +func (x *ListSnapshotsRequest) GetMaxEntries() int32 { + if x != nil { + return x.MaxEntries } return 0 } -func (m *ListSnapshotsRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken +func (x *ListSnapshotsRequest) GetStartingToken() string { + if x != nil { + return x.StartingToken } return "" } -func (m *ListSnapshotsRequest) GetSourceVolumeId() string { - if m != nil { - return m.SourceVolumeId +func (x *ListSnapshotsRequest) GetSourceVolumeId() string { + if x != nil { + return x.SourceVolumeId } return "" } -func (m *ListSnapshotsRequest) GetSnapshotId() string { - if m != nil { - return m.SnapshotId +func (x *ListSnapshotsRequest) GetSnapshotId() string { + if x != nil { + return x.SnapshotId } return "" } -func (m *ListSnapshotsRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *ListSnapshotsRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type ListSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` // This token allows you to get the next page of entries for // `ListSnapshots` request. If the number of entries is larger than @@ -3579,91 +3614,60 @@ type ListSnapshotsResponse struct { // `starting_token` field in the next `ListSnapshots` request. This // field is OPTIONAL. // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` } -func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } -func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse) ProtoMessage() {} -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{38} +func (x *ListSnapshotsResponse) Reset() { + *x = ListSnapshotsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +func (x *ListSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) -} -func (m *ListSnapshotsResponse) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse.Size(m) -} -func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo -func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} +func (*ListSnapshotsResponse) ProtoMessage() {} -func (m *ListSnapshotsResponse) GetNextToken() string { - if m != nil { - return m.NextToken +func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" -} - -type ListSnapshotsResponse_Entry struct { - Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return mi.MessageOf(x) } -func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } -func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListSnapshotsResponse_Entry) ProtoMessage() {} -func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{38, 0} +// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{40} } -func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) -} -func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) -} -func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src) -} -func (m *ListSnapshotsResponse_Entry) XXX_Size() int { - return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) -} -func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +func (x *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if x != nil { + return x.Entries + } + return nil } -var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo - -func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { - if m != nil { - return m.Snapshot +func (x *ListSnapshotsResponse) GetNextToken() string { + if x != nil { + return x.NextToken } - return nil + return "" } type ControllerExpandVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to expand. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // This allows CO to specify the capacity requirements of the volume @@ -3679,117 +3683,133 @@ type ControllerExpandVolumeRequest struct { // node_expansion_required to false in ControllerExpandVolumeResponse // to skip invocation of NodeExpandVolume on the node by the CO. // This is an OPTIONAL field. - VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` } -func (m *ControllerExpandVolumeRequest) Reset() { *m = ControllerExpandVolumeRequest{} } -func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerExpandVolumeRequest) ProtoMessage() {} -func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{39} +func (x *ControllerExpandVolumeRequest) Reset() { + *x = ControllerExpandVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b) -} -func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic) -} -func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src) -} -func (m *ControllerExpandVolumeRequest) XXX_Size() int { - return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m) +func (x *ControllerExpandVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerExpandVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerExpandVolumeRequest.DiscardUnknown(m) + +func (*ControllerExpandVolumeRequest) ProtoMessage() {} + +func (x *ControllerExpandVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerExpandVolumeRequest proto.InternalMessageInfo +// Deprecated: Use ControllerExpandVolumeRequest.ProtoReflect.Descriptor instead. +func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{41} +} -func (m *ControllerExpandVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *ControllerExpandVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange +func (x *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if x != nil { + return x.CapacityRange } return nil } -func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *ControllerExpandVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability +func (x *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if x != nil { + return x.VolumeCapability } return nil } type ControllerExpandVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Capacity of volume after expansion. This field is REQUIRED. CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` // Whether node expansion is required for the volume. When true // the CO MUST make NodeExpandVolume RPC call on the node. This field // is REQUIRED. - NodeExpansionRequired bool `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + NodeExpansionRequired bool `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"` } -func (m *ControllerExpandVolumeResponse) Reset() { *m = ControllerExpandVolumeResponse{} } -func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerExpandVolumeResponse) ProtoMessage() {} -func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{40} +func (x *ControllerExpandVolumeResponse) Reset() { + *x = ControllerExpandVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b) +func (x *ControllerExpandVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic) -} -func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src) -} -func (m *ControllerExpandVolumeResponse) XXX_Size() int { - return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m) -} -func (m *ControllerExpandVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerExpandVolumeResponse.DiscardUnknown(m) + +func (*ControllerExpandVolumeResponse) ProtoMessage() {} + +func (x *ControllerExpandVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ControllerExpandVolumeResponse proto.InternalMessageInfo +// Deprecated: Use ControllerExpandVolumeResponse.ProtoReflect.Descriptor instead. +func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{42} +} -func (m *ControllerExpandVolumeResponse) GetCapacityBytes() int64 { - if m != nil { - return m.CapacityBytes +func (x *ControllerExpandVolumeResponse) GetCapacityBytes() int64 { + if x != nil { + return x.CapacityBytes } return 0 } -func (m *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool { - if m != nil { - return m.NodeExpansionRequired +func (x *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool { + if x != nil { + return x.NodeExpansionRequired } return false } type NodeStageVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to publish. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The CO SHALL set this field to the value returned by @@ -3825,111 +3845,126 @@ type NodeStageVolumeRequest struct { // CreateVolumeResponse.Volume.volume_context. // This field is OPTIONAL and MUST match the volume_context of the // volume identified by `volume_id`. - VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } -func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeRequest) ProtoMessage() {} -func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{41} +func (x *NodeStageVolumeRequest) Reset() { + *x = NodeStageVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) -} -func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) -} -func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src) -} -func (m *NodeStageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +func (x *NodeStageVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) + +func (*NodeStageVolumeRequest) ProtoMessage() {} + +func (x *NodeStageVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo +// Deprecated: Use NodeStageVolumeRequest.ProtoReflect.Descriptor instead. +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{43} +} -func (m *NodeStageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodeStageVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string { - if m != nil { - return m.PublishContext +func (x *NodeStageVolumeRequest) GetPublishContext() map[string]string { + if x != nil { + return x.PublishContext } return nil } -func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath +func (x *NodeStageVolumeRequest) GetStagingTargetPath() string { + if x != nil { + return x.StagingTargetPath } return "" } -func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability +func (x *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if x != nil { + return x.VolumeCapability } return nil } -func (m *NodeStageVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *NodeStageVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext +func (x *NodeStageVolumeRequest) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } return nil } type NodeStageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } -func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeStageVolumeResponse) ProtoMessage() {} -func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{42} +func (x *NodeStageVolumeResponse) Reset() { + *x = NodeStageVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) -} -func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +func (x *NodeStageVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src) -} -func (m *NodeStageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeStageVolumeResponse.Size(m) + +func (*NodeStageVolumeResponse) ProtoMessage() {} + +func (x *NodeStageVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) + +// Deprecated: Use NodeStageVolumeResponse.ProtoReflect.Descriptor instead. +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{44} } -var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo - type NodeUnstageVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The path at which the volume was staged. It MUST be an absolute @@ -3939,83 +3974,98 @@ type NodeUnstageVolumeRequest struct { // SP SHOULD support the maximum path length allowed by the operating // system/filesystem, but, at a minimum, SP MUST accept a max path // length of at least 128 bytes. - StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` } -func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } -func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeRequest) ProtoMessage() {} -func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{43} +func (x *NodeUnstageVolumeRequest) Reset() { + *x = NodeUnstageVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +func (x *NodeUnstageVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) -} -func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src) -} -func (m *NodeUnstageVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) -} -func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) + +func (*NodeUnstageVolumeRequest) ProtoMessage() {} + +func (x *NodeUnstageVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo +// Deprecated: Use NodeUnstageVolumeRequest.ProtoReflect.Descriptor instead. +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{45} +} -func (m *NodeUnstageVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodeUnstageVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath +func (x *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if x != nil { + return x.StagingTargetPath } return "" } type NodeUnstageVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } -func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnstageVolumeResponse) ProtoMessage() {} -func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{44} +func (x *NodeUnstageVolumeResponse) Reset() { + *x = NodeUnstageVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +func (x *NodeUnstageVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) -} -func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src) -} -func (m *NodeUnstageVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) -} -func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) + +func (*NodeUnstageVolumeResponse) ProtoMessage() {} + +func (x *NodeUnstageVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo +// Deprecated: Use NodeUnstageVolumeResponse.ProtoReflect.Descriptor instead. +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{46} +} type NodePublishVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume to publish. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The CO SHALL set this field to the value returned by @@ -4068,125 +4118,140 @@ type NodePublishVolumeRequest struct { // CreateVolumeResponse.Volume.volume_context. // This field is OPTIONAL and MUST match the volume_context of the // volume identified by `volume_id`. - VolumeContext map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeContext map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } -func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeRequest) ProtoMessage() {} -func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{45} +func (x *NodePublishVolumeRequest) Reset() { + *x = NodePublishVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) -} -func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +func (x *NodePublishVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src) -} -func (m *NodePublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeRequest.Size(m) -} -func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) + +func (*NodePublishVolumeRequest) ProtoMessage() {} + +func (x *NodePublishVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo +// Deprecated: Use NodePublishVolumeRequest.ProtoReflect.Descriptor instead. +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{47} +} -func (m *NodePublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodePublishVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string { - if m != nil { - return m.PublishContext +func (x *NodePublishVolumeRequest) GetPublishContext() map[string]string { + if x != nil { + return x.PublishContext } return nil } -func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath +func (x *NodePublishVolumeRequest) GetStagingTargetPath() string { + if x != nil { + return x.StagingTargetPath } return "" } -func (m *NodePublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath +func (x *NodePublishVolumeRequest) GetTargetPath() string { + if x != nil { + return x.TargetPath } return "" } -func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability +func (x *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if x != nil { + return x.VolumeCapability } return nil } -func (m *NodePublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly +func (x *NodePublishVolumeRequest) GetReadonly() bool { + if x != nil { + return x.Readonly } return false } -func (m *NodePublishVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *NodePublishVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string { - if m != nil { - return m.VolumeContext +func (x *NodePublishVolumeRequest) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } return nil } type NodePublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } -func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeResponse) ProtoMessage() {} -func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{46} +func (x *NodePublishVolumeResponse) Reset() { + *x = NodePublishVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) -} -func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) -} -func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src) -} -func (m *NodePublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +func (x *NodePublishVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) + +func (*NodePublishVolumeResponse) ProtoMessage() {} + +func (x *NodePublishVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo +// Deprecated: Use NodePublishVolumeResponse.ProtoReflect.Descriptor instead. +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{48} +} type NodeUnpublishVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The path at which the volume was published. It MUST be an absolute @@ -4197,83 +4262,98 @@ type NodeUnpublishVolumeRequest struct { // SP SHOULD support the maximum path length allowed by the operating // system/filesystem, but, at a minimum, SP MUST accept a max path // length of at least 128 bytes. - TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` } -func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } -func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeRequest) ProtoMessage() {} -func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{47} +func (x *NodeUnpublishVolumeRequest) Reset() { + *x = NodeUnpublishVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +func (x *NodeUnpublishVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) -} -func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src) -} -func (m *NodeUnpublishVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) -} -func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) + +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} + +func (x *NodeUnpublishVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo +// Deprecated: Use NodeUnpublishVolumeRequest.ProtoReflect.Descriptor instead. +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{49} +} -func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodeUnpublishVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath +func (x *NodeUnpublishVolumeRequest) GetTargetPath() string { + if x != nil { + return x.TargetPath } return "" } type NodeUnpublishVolumeResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } -func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeResponse) ProtoMessage() {} -func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{48} +func (x *NodeUnpublishVolumeResponse) Reset() { + *x = NodeUnpublishVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +func (x *NodeUnpublishVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) -} -func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src) -} -func (m *NodeUnpublishVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) -} -func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) + +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} + +func (x *NodeUnpublishVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo +// Deprecated: Use NodeUnpublishVolumeResponse.ProtoReflect.Descriptor instead. +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{50} +} type NodeGetVolumeStatsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // It can be any valid path where volume was previously @@ -4295,111 +4375,127 @@ type NodeGetVolumeStatsRequest struct { // SP SHOULD support the maximum path length allowed by the operating // system/filesystem, but, at a minimum, SP MUST accept a max path // length of at least 128 bytes. - StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` } -func (m *NodeGetVolumeStatsRequest) Reset() { *m = NodeGetVolumeStatsRequest{} } -func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetVolumeStatsRequest) ProtoMessage() {} -func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{49} +func (x *NodeGetVolumeStatsRequest) Reset() { + *x = NodeGetVolumeStatsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b) +func (x *NodeGetVolumeStatsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic) -} -func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src) -} -func (m *NodeGetVolumeStatsRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m) -} -func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m) + +func (*NodeGetVolumeStatsRequest) ProtoMessage() {} + +func (x *NodeGetVolumeStatsRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo +// Deprecated: Use NodeGetVolumeStatsRequest.ProtoReflect.Descriptor instead. +func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{51} +} -func (m *NodeGetVolumeStatsRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodeGetVolumeStatsRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodeGetVolumeStatsRequest) GetVolumePath() string { - if m != nil { - return m.VolumePath +func (x *NodeGetVolumeStatsRequest) GetVolumePath() string { + if x != nil { + return x.VolumePath } return "" } -func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath +func (x *NodeGetVolumeStatsRequest) GetStagingTargetPath() string { + if x != nil { + return x.StagingTargetPath } return "" } type NodeGetVolumeStatsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This field is OPTIONAL. Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` // Information about the current condition of the volume. // This field is OPTIONAL. // This field MUST be specified if the VOLUME_CONDITION node // capability is supported. - VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` } -func (m *NodeGetVolumeStatsResponse) Reset() { *m = NodeGetVolumeStatsResponse{} } -func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetVolumeStatsResponse) ProtoMessage() {} -func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{50} +func (x *NodeGetVolumeStatsResponse) Reset() { + *x = NodeGetVolumeStatsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b) -} -func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic) -} -func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src) +func (x *NodeGetVolumeStatsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetVolumeStatsResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m) -} -func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m) + +func (*NodeGetVolumeStatsResponse) ProtoMessage() {} + +func (x *NodeGetVolumeStatsResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo +// Deprecated: Use NodeGetVolumeStatsResponse.ProtoReflect.Descriptor instead. +func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{52} +} -func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage { - if m != nil { - return m.Usage +func (x *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage { + if x != nil { + return x.Usage } return nil } -func (m *NodeGetVolumeStatsResponse) GetVolumeCondition() *VolumeCondition { - if m != nil { - return m.VolumeCondition +func (x *NodeGetVolumeStatsResponse) GetVolumeCondition() *VolumeCondition { + if x != nil { + return x.VolumeCondition } return nil } type VolumeUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The available capacity in specified Unit. This field is OPTIONAL. // The value of this field MUST NOT be negative. Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"` @@ -4410,236 +4506,261 @@ type VolumeUsage struct { // The value of this field MUST NOT be negative. Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` // Units by which values are measured. This field is REQUIRED. - Unit VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Unit VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"` } -func (m *VolumeUsage) Reset() { *m = VolumeUsage{} } -func (m *VolumeUsage) String() string { return proto.CompactTextString(m) } -func (*VolumeUsage) ProtoMessage() {} -func (*VolumeUsage) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{51} +func (x *VolumeUsage) Reset() { + *x = VolumeUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeUsage) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeUsage.Unmarshal(m, b) -} -func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic) +func (x *VolumeUsage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeUsage) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeUsage.Merge(m, src) -} -func (m *VolumeUsage) XXX_Size() int { - return xxx_messageInfo_VolumeUsage.Size(m) -} -func (m *VolumeUsage) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeUsage.DiscardUnknown(m) + +func (*VolumeUsage) ProtoMessage() {} + +func (x *VolumeUsage) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo +// Deprecated: Use VolumeUsage.ProtoReflect.Descriptor instead. +func (*VolumeUsage) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{53} +} -func (m *VolumeUsage) GetAvailable() int64 { - if m != nil { - return m.Available +func (x *VolumeUsage) GetAvailable() int64 { + if x != nil { + return x.Available } return 0 } -func (m *VolumeUsage) GetTotal() int64 { - if m != nil { - return m.Total +func (x *VolumeUsage) GetTotal() int64 { + if x != nil { + return x.Total } return 0 } -func (m *VolumeUsage) GetUsed() int64 { - if m != nil { - return m.Used +func (x *VolumeUsage) GetUsed() int64 { + if x != nil { + return x.Used } return 0 } -func (m *VolumeUsage) GetUnit() VolumeUsage_Unit { - if m != nil { - return m.Unit +func (x *VolumeUsage) GetUnit() VolumeUsage_Unit { + if x != nil { + return x.Unit } return VolumeUsage_UNKNOWN } // VolumeCondition represents the current condition of a volume. type VolumeCondition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Normal volumes are available for use and operating optimally. // An abnormal volume does not meet these criteria. // This field is REQUIRED. Abnormal bool `protobuf:"varint,1,opt,name=abnormal,proto3" json:"abnormal,omitempty"` // The message describing the condition of the volume. // This field is REQUIRED. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` } -func (m *VolumeCondition) Reset() { *m = VolumeCondition{} } -func (m *VolumeCondition) String() string { return proto.CompactTextString(m) } -func (*VolumeCondition) ProtoMessage() {} -func (*VolumeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{52} +func (x *VolumeCondition) Reset() { + *x = VolumeCondition{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeCondition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeCondition.Unmarshal(m, b) -} -func (m *VolumeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeCondition.Marshal(b, m, deterministic) -} -func (m *VolumeCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeCondition.Merge(m, src) +func (x *VolumeCondition) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeCondition) XXX_Size() int { - return xxx_messageInfo_VolumeCondition.Size(m) -} -func (m *VolumeCondition) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeCondition.DiscardUnknown(m) + +func (*VolumeCondition) ProtoMessage() {} + +func (x *VolumeCondition) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_VolumeCondition proto.InternalMessageInfo +// Deprecated: Use VolumeCondition.ProtoReflect.Descriptor instead. +func (*VolumeCondition) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{54} +} -func (m *VolumeCondition) GetAbnormal() bool { - if m != nil { - return m.Abnormal +func (x *VolumeCondition) GetAbnormal() bool { + if x != nil { + return x.Abnormal } return false } -func (m *VolumeCondition) GetMessage() string { - if m != nil { - return m.Message +func (x *VolumeCondition) GetMessage() string { + if x != nil { + return x.Message } return "" } type NodeGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } -func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesRequest) ProtoMessage() {} -func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{53} +func (x *NodeGetCapabilitiesRequest) Reset() { + *x = NodeGetCapabilitiesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src) +func (x *NodeGetCapabilitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) -} -func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) + +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} + +func (x *NodeGetCapabilitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo +// Deprecated: Use NodeGetCapabilitiesRequest.ProtoReflect.Descriptor instead. +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{55} +} type NodeGetCapabilitiesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // All the capabilities that the node service supports. This field // is OPTIONAL. - Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` } -func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } -func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesResponse) ProtoMessage() {} -func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{54} +func (x *NodeGetCapabilitiesResponse) Reset() { + *x = NodeGetCapabilitiesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) -} -func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src) -} -func (m *NodeGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +func (x *NodeGetCapabilitiesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) + +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} + +func (x *NodeGetCapabilitiesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo +// Deprecated: Use NodeGetCapabilitiesResponse.ProtoReflect.Descriptor instead. +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{56} +} -func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { - if m != nil { - return m.Capabilities +func (x *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if x != nil { + return x.Capabilities } return nil } // Specifies a capability of the node service. type NodeServiceCapability struct { - // Types that are valid to be assigned to Type: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: // // *NodeServiceCapability_Rpc - Type isNodeServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` } -func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } -func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability) ProtoMessage() {} -func (*NodeServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{55} +func (x *NodeServiceCapability) Reset() { + *x = NodeServiceCapability{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) -} -func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) -} -func (m *NodeServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability.Merge(m, src) -} -func (m *NodeServiceCapability) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability.Size(m) -} -func (m *NodeServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +func (x *NodeServiceCapability) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo +func (*NodeServiceCapability) ProtoMessage() {} -type isNodeServiceCapability_Type interface { - isNodeServiceCapability_Type() +func (x *NodeServiceCapability) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type NodeServiceCapability_Rpc struct { - Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +// Deprecated: Use NodeServiceCapability.ProtoReflect.Descriptor instead. +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{57} } -func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} - func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { if m != nil { return m.Type @@ -4647,91 +4768,67 @@ func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { return nil } -func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { - if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { +func (x *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := x.GetType().(*NodeServiceCapability_Rpc); ok { return x.Rpc } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*NodeServiceCapability_Rpc)(nil), - } +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() } -type NodeServiceCapability_RPC struct { - Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type NodeServiceCapability_Rpc struct { + // RPC that the controller supports. + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` } -func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } -func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability_RPC) ProtoMessage() {} -func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{55, 0} -} +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} -func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) -} -func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src) -} -func (m *NodeServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) -} -func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +type NodeGetInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo - -func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { - if m != nil { - return m.Type +func (x *NodeGetInfoRequest) Reset() { + *x = NodeGetInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return NodeServiceCapability_RPC_UNKNOWN } -type NodeGetInfoRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *NodeGetInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } -func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoRequest) ProtoMessage() {} -func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{56} -} +func (*NodeGetInfoRequest) ProtoMessage() {} -func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) -} -func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) -} -func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoRequest.Merge(m, src) -} -func (m *NodeGetInfoRequest) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoRequest.Size(m) -} -func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +func (x *NodeGetInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo +// Deprecated: Use NodeGetInfoRequest.ProtoReflect.Descriptor instead. +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{58} +} type NodeGetInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The identifier of the node as understood by the SP. // This field is REQUIRED. // This field MUST contain enough information to uniquely identify @@ -4770,59 +4867,67 @@ type NodeGetInfoResponse struct { // // Indicates the node exists within the "region" "R1" and the "zone" // "Z2". - AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` } -func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } -func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetInfoResponse) ProtoMessage() {} -func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{57} +func (x *NodeGetInfoResponse) Reset() { + *x = NodeGetInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) -} -func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) -} -func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeGetInfoResponse.Merge(m, src) -} -func (m *NodeGetInfoResponse) XXX_Size() int { - return xxx_messageInfo_NodeGetInfoResponse.Size(m) +func (x *NodeGetInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) + +func (*NodeGetInfoResponse) ProtoMessage() {} + +func (x *NodeGetInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo +// Deprecated: Use NodeGetInfoResponse.ProtoReflect.Descriptor instead. +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{59} +} -func (m *NodeGetInfoResponse) GetNodeId() string { - if m != nil { - return m.NodeId +func (x *NodeGetInfoResponse) GetNodeId() string { + if x != nil { + return x.NodeId } return "" } -func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { - if m != nil { - return m.MaxVolumesPerNode +func (x *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if x != nil { + return x.MaxVolumesPerNode } return 0 } -func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { - if m != nil { - return m.AccessibleTopology +func (x *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if x != nil { + return x.AccessibleTopology } return nil } type NodeExpandVolumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the volume. This field is REQUIRED. VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` // The path on which volume is available. This field is REQUIRED. @@ -4861,239 +4966,262 @@ type NodeExpandVolumeRequest struct { // Secrets required by plugin to complete node expand volume request. // This field is OPTIONAL. Refer to the `Secrets Requirements` // section on how to use this field. - Secrets map[string]string `protobuf:"bytes,6,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,6,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NodeExpandVolumeRequest) Reset() { *m = NodeExpandVolumeRequest{} } -func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeExpandVolumeRequest) ProtoMessage() {} -func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{58} +func (x *NodeExpandVolumeRequest) Reset() { + *x = NodeExpandVolumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b) -} -func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic) +func (x *NodeExpandVolumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src) -} -func (m *NodeExpandVolumeRequest) XXX_Size() int { - return xxx_messageInfo_NodeExpandVolumeRequest.Size(m) -} -func (m *NodeExpandVolumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NodeExpandVolumeRequest.DiscardUnknown(m) + +func (*NodeExpandVolumeRequest) ProtoMessage() {} + +func (x *NodeExpandVolumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeExpandVolumeRequest proto.InternalMessageInfo +// Deprecated: Use NodeExpandVolumeRequest.ProtoReflect.Descriptor instead. +func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{60} +} -func (m *NodeExpandVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId +func (x *NodeExpandVolumeRequest) GetVolumeId() string { + if x != nil { + return x.VolumeId } return "" } -func (m *NodeExpandVolumeRequest) GetVolumePath() string { - if m != nil { - return m.VolumePath +func (x *NodeExpandVolumeRequest) GetVolumePath() string { + if x != nil { + return x.VolumePath } return "" } -func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange +func (x *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if x != nil { + return x.CapacityRange } return nil } -func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string { - if m != nil { - return m.StagingTargetPath +func (x *NodeExpandVolumeRequest) GetStagingTargetPath() string { + if x != nil { + return x.StagingTargetPath } return "" } -func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability +func (x *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if x != nil { + return x.VolumeCapability } return nil } -func (m *NodeExpandVolumeRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *NodeExpandVolumeRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type NodeExpandVolumeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The capacity of the volume in bytes. This field is OPTIONAL. - CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` } -func (m *NodeExpandVolumeResponse) Reset() { *m = NodeExpandVolumeResponse{} } -func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeExpandVolumeResponse) ProtoMessage() {} -func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{59} +func (x *NodeExpandVolumeResponse) Reset() { + *x = NodeExpandVolumeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b) -} -func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic) -} -func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src) -} -func (m *NodeExpandVolumeResponse) XXX_Size() int { - return xxx_messageInfo_NodeExpandVolumeResponse.Size(m) +func (x *NodeExpandVolumeResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NodeExpandVolumeResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NodeExpandVolumeResponse.DiscardUnknown(m) + +func (*NodeExpandVolumeResponse) ProtoMessage() {} + +func (x *NodeExpandVolumeResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_NodeExpandVolumeResponse proto.InternalMessageInfo +// Deprecated: Use NodeExpandVolumeResponse.ProtoReflect.Descriptor instead. +func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{61} +} -func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 { - if m != nil { - return m.CapacityBytes +func (x *NodeExpandVolumeResponse) GetCapacityBytes() int64 { + if x != nil { + return x.CapacityBytes } return 0 } type GroupControllerGetCapabilitiesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *GroupControllerGetCapabilitiesRequest) Reset() { *m = GroupControllerGetCapabilitiesRequest{} } -func (m *GroupControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*GroupControllerGetCapabilitiesRequest) ProtoMessage() {} -func (*GroupControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{60} +func (x *GroupControllerGetCapabilitiesRequest) Reset() { + *x = GroupControllerGetCapabilitiesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GroupControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Unmarshal(m, b) +func (x *GroupControllerGetCapabilitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GroupControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) -} -func (m *GroupControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Merge(m, src) -} -func (m *GroupControllerGetCapabilitiesRequest) XXX_Size() int { - return xxx_messageInfo_GroupControllerGetCapabilitiesRequest.Size(m) -} -func (m *GroupControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GroupControllerGetCapabilitiesRequest.DiscardUnknown(m) + +func (*GroupControllerGetCapabilitiesRequest) ProtoMessage() {} + +func (x *GroupControllerGetCapabilitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GroupControllerGetCapabilitiesRequest proto.InternalMessageInfo +// Deprecated: Use GroupControllerGetCapabilitiesRequest.ProtoReflect.Descriptor instead. +func (*GroupControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{62} +} type GroupControllerGetCapabilitiesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // All the capabilities that the group controller service supports. // This field is OPTIONAL. - Capabilities []*GroupControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Capabilities []*GroupControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` } -func (m *GroupControllerGetCapabilitiesResponse) Reset() { - *m = GroupControllerGetCapabilitiesResponse{} -} -func (m *GroupControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*GroupControllerGetCapabilitiesResponse) ProtoMessage() {} -func (*GroupControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{61} +func (x *GroupControllerGetCapabilitiesResponse) Reset() { + *x = GroupControllerGetCapabilitiesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GroupControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Unmarshal(m, b) +func (x *GroupControllerGetCapabilitiesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GroupControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) -} -func (m *GroupControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Merge(m, src) -} -func (m *GroupControllerGetCapabilitiesResponse) XXX_Size() int { - return xxx_messageInfo_GroupControllerGetCapabilitiesResponse.Size(m) -} -func (m *GroupControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GroupControllerGetCapabilitiesResponse.DiscardUnknown(m) + +func (*GroupControllerGetCapabilitiesResponse) ProtoMessage() {} + +func (x *GroupControllerGetCapabilitiesResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GroupControllerGetCapabilitiesResponse proto.InternalMessageInfo +// Deprecated: Use GroupControllerGetCapabilitiesResponse.ProtoReflect.Descriptor instead. +func (*GroupControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{63} +} -func (m *GroupControllerGetCapabilitiesResponse) GetCapabilities() []*GroupControllerServiceCapability { - if m != nil { - return m.Capabilities +func (x *GroupControllerGetCapabilitiesResponse) GetCapabilities() []*GroupControllerServiceCapability { + if x != nil { + return x.Capabilities } return nil } // Specifies a capability of the group controller service. type GroupControllerServiceCapability struct { - // Types that are valid to be assigned to Type: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Type: // // *GroupControllerServiceCapability_Rpc - Type isGroupControllerServiceCapability_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Type isGroupControllerServiceCapability_Type `protobuf_oneof:"type"` } -func (m *GroupControllerServiceCapability) Reset() { *m = GroupControllerServiceCapability{} } -func (m *GroupControllerServiceCapability) String() string { return proto.CompactTextString(m) } -func (*GroupControllerServiceCapability) ProtoMessage() {} -func (*GroupControllerServiceCapability) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{62} +func (x *GroupControllerServiceCapability) Reset() { + *x = GroupControllerServiceCapability{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GroupControllerServiceCapability) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupControllerServiceCapability.Unmarshal(m, b) -} -func (m *GroupControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupControllerServiceCapability.Marshal(b, m, deterministic) -} -func (m *GroupControllerServiceCapability) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupControllerServiceCapability.Merge(m, src) -} -func (m *GroupControllerServiceCapability) XXX_Size() int { - return xxx_messageInfo_GroupControllerServiceCapability.Size(m) -} -func (m *GroupControllerServiceCapability) XXX_DiscardUnknown() { - xxx_messageInfo_GroupControllerServiceCapability.DiscardUnknown(m) +func (x *GroupControllerServiceCapability) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GroupControllerServiceCapability proto.InternalMessageInfo +func (*GroupControllerServiceCapability) ProtoMessage() {} -type isGroupControllerServiceCapability_Type interface { - isGroupControllerServiceCapability_Type() +func (x *GroupControllerServiceCapability) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type GroupControllerServiceCapability_Rpc struct { - Rpc *GroupControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +// Deprecated: Use GroupControllerServiceCapability.ProtoReflect.Descriptor instead. +func (*GroupControllerServiceCapability) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{64} } -func (*GroupControllerServiceCapability_Rpc) isGroupControllerServiceCapability_Type() {} - func (m *GroupControllerServiceCapability) GetType() isGroupControllerServiceCapability_Type { if m != nil { return m.Type @@ -5101,60 +5229,29 @@ func (m *GroupControllerServiceCapability) GetType() isGroupControllerServiceCap return nil } -func (m *GroupControllerServiceCapability) GetRpc() *GroupControllerServiceCapability_RPC { - if x, ok := m.GetType().(*GroupControllerServiceCapability_Rpc); ok { +func (x *GroupControllerServiceCapability) GetRpc() *GroupControllerServiceCapability_RPC { + if x, ok := x.GetType().(*GroupControllerServiceCapability_Rpc); ok { return x.Rpc } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*GroupControllerServiceCapability) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*GroupControllerServiceCapability_Rpc)(nil), - } -} - -type GroupControllerServiceCapability_RPC struct { - Type GroupControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.GroupControllerServiceCapability_RPC_Type" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GroupControllerServiceCapability_RPC) Reset() { *m = GroupControllerServiceCapability_RPC{} } -func (m *GroupControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*GroupControllerServiceCapability_RPC) ProtoMessage() {} -func (*GroupControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{62, 0} +type isGroupControllerServiceCapability_Type interface { + isGroupControllerServiceCapability_Type() } -func (m *GroupControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GroupControllerServiceCapability_RPC.Unmarshal(m, b) -} -func (m *GroupControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GroupControllerServiceCapability_RPC.Marshal(b, m, deterministic) -} -func (m *GroupControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupControllerServiceCapability_RPC.Merge(m, src) -} -func (m *GroupControllerServiceCapability_RPC) XXX_Size() int { - return xxx_messageInfo_GroupControllerServiceCapability_RPC.Size(m) -} -func (m *GroupControllerServiceCapability_RPC) XXX_DiscardUnknown() { - xxx_messageInfo_GroupControllerServiceCapability_RPC.DiscardUnknown(m) +type GroupControllerServiceCapability_Rpc struct { + // RPC that the controller supports. + Rpc *GroupControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` } -var xxx_messageInfo_GroupControllerServiceCapability_RPC proto.InternalMessageInfo - -func (m *GroupControllerServiceCapability_RPC) GetType() GroupControllerServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return GroupControllerServiceCapability_RPC_UNKNOWN -} +func (*GroupControllerServiceCapability_Rpc) isGroupControllerServiceCapability_Type() {} type CreateVolumeGroupSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The suggested name for the group snapshot. This field is REQUIRED // for idempotency. // Any Unicode string that conforms to the length limit is allowed @@ -5175,107 +5272,123 @@ type CreateVolumeGroupSnapshotRequest struct { // Plugin specific parameters passed in as opaque key-value pairs. // This field is OPTIONAL. The Plugin is responsible for parsing and // validating these parameters. COs will treat these as opaque. - Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *CreateVolumeGroupSnapshotRequest) Reset() { *m = CreateVolumeGroupSnapshotRequest{} } -func (m *CreateVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeGroupSnapshotRequest) ProtoMessage() {} -func (*CreateVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{63} +func (x *CreateVolumeGroupSnapshotRequest) Reset() { + *x = CreateVolumeGroupSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Unmarshal(m, b) -} -func (m *CreateVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) +func (x *CreateVolumeGroupSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Merge(m, src) -} -func (m *CreateVolumeGroupSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_CreateVolumeGroupSnapshotRequest.Size(m) -} -func (m *CreateVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeGroupSnapshotRequest.DiscardUnknown(m) + +func (*CreateVolumeGroupSnapshotRequest) ProtoMessage() {} + +func (x *CreateVolumeGroupSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[65] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CreateVolumeGroupSnapshotRequest proto.InternalMessageInfo +// Deprecated: Use CreateVolumeGroupSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{65} +} -func (m *CreateVolumeGroupSnapshotRequest) GetName() string { - if m != nil { - return m.Name +func (x *CreateVolumeGroupSnapshotRequest) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *CreateVolumeGroupSnapshotRequest) GetSourceVolumeIds() []string { - if m != nil { - return m.SourceVolumeIds +func (x *CreateVolumeGroupSnapshotRequest) GetSourceVolumeIds() []string { + if x != nil { + return x.SourceVolumeIds } return nil } -func (m *CreateVolumeGroupSnapshotRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *CreateVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } -func (m *CreateVolumeGroupSnapshotRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters +func (x *CreateVolumeGroupSnapshotRequest) GetParameters() map[string]string { + if x != nil { + return x.Parameters } return nil } type CreateVolumeGroupSnapshotResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Contains all attributes of the newly created group snapshot. // This field is REQUIRED. - GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` } -func (m *CreateVolumeGroupSnapshotResponse) Reset() { *m = CreateVolumeGroupSnapshotResponse{} } -func (m *CreateVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeGroupSnapshotResponse) ProtoMessage() {} -func (*CreateVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{64} +func (x *CreateVolumeGroupSnapshotResponse) Reset() { + *x = CreateVolumeGroupSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CreateVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Unmarshal(m, b) -} -func (m *CreateVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) -} -func (m *CreateVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Merge(m, src) +func (x *CreateVolumeGroupSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateVolumeGroupSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_CreateVolumeGroupSnapshotResponse.Size(m) -} -func (m *CreateVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateVolumeGroupSnapshotResponse.DiscardUnknown(m) + +func (*CreateVolumeGroupSnapshotResponse) ProtoMessage() {} + +func (x *CreateVolumeGroupSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CreateVolumeGroupSnapshotResponse proto.InternalMessageInfo +// Deprecated: Use CreateVolumeGroupSnapshotResponse.ProtoReflect.Descriptor instead. +func (*CreateVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{66} +} -func (m *CreateVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { - if m != nil { - return m.GroupSnapshot +func (x *CreateVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { + if x != nil { + return x.GroupSnapshot } return nil } type VolumeGroupSnapshot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The identifier for this group snapshot, generated by the plugin. // This field MUST contain enough information to uniquely identify // this specific snapshot vs all other group snapshots supported by @@ -5291,7 +5404,7 @@ type VolumeGroupSnapshot struct { Snapshots []*Snapshot `protobuf:"bytes,2,rep,name=snapshots,proto3" json:"snapshots,omitempty"` // Timestamp of when the volume group snapshot was taken. // This field is REQUIRED. - CreationTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + CreationTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` // Indicates if all individual snapshots in the group snapshot // are ready to use as a `volume_content_source` in a // `CreateVolumeRequest`. The default value is false. @@ -5301,66 +5414,74 @@ type VolumeGroupSnapshot struct { // have ready_to_use set to true, the SP SHOULD set this field to // true. // This field is REQUIRED. - ReadyToUse bool `protobuf:"varint,4,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ReadyToUse bool `protobuf:"varint,4,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` } -func (m *VolumeGroupSnapshot) Reset() { *m = VolumeGroupSnapshot{} } -func (m *VolumeGroupSnapshot) String() string { return proto.CompactTextString(m) } -func (*VolumeGroupSnapshot) ProtoMessage() {} -func (*VolumeGroupSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{65} +func (x *VolumeGroupSnapshot) Reset() { + *x = VolumeGroupSnapshot{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *VolumeGroupSnapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VolumeGroupSnapshot.Unmarshal(m, b) -} -func (m *VolumeGroupSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VolumeGroupSnapshot.Marshal(b, m, deterministic) -} -func (m *VolumeGroupSnapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeGroupSnapshot.Merge(m, src) +func (x *VolumeGroupSnapshot) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *VolumeGroupSnapshot) XXX_Size() int { - return xxx_messageInfo_VolumeGroupSnapshot.Size(m) -} -func (m *VolumeGroupSnapshot) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeGroupSnapshot.DiscardUnknown(m) + +func (*VolumeGroupSnapshot) ProtoMessage() {} + +func (x *VolumeGroupSnapshot) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_VolumeGroupSnapshot proto.InternalMessageInfo +// Deprecated: Use VolumeGroupSnapshot.ProtoReflect.Descriptor instead. +func (*VolumeGroupSnapshot) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{67} +} -func (m *VolumeGroupSnapshot) GetGroupSnapshotId() string { - if m != nil { - return m.GroupSnapshotId +func (x *VolumeGroupSnapshot) GetGroupSnapshotId() string { + if x != nil { + return x.GroupSnapshotId } return "" } -func (m *VolumeGroupSnapshot) GetSnapshots() []*Snapshot { - if m != nil { - return m.Snapshots +func (x *VolumeGroupSnapshot) GetSnapshots() []*Snapshot { + if x != nil { + return x.Snapshots } return nil } -func (m *VolumeGroupSnapshot) GetCreationTime() *timestamp.Timestamp { - if m != nil { - return m.CreationTime +func (x *VolumeGroupSnapshot) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime } return nil } -func (m *VolumeGroupSnapshot) GetReadyToUse() bool { - if m != nil { - return m.ReadyToUse +func (x *VolumeGroupSnapshot) GetReadyToUse() bool { + if x != nil { + return x.ReadyToUse } return false } type DeleteVolumeGroupSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the group snapshot to be deleted. // This field is REQUIRED. GroupSnapshotId string `protobuf:"bytes,1,opt,name=group_snapshot_id,json=groupSnapshotId,proto3" json:"group_snapshot_id,omitempty"` @@ -5380,90 +5501,105 @@ type DeleteVolumeGroupSnapshotRequest struct { // section on how to use this field. // The secrets provided in this field SHOULD be the same for // all group snapshot operations on the same group snapshot. - Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *DeleteVolumeGroupSnapshotRequest) Reset() { *m = DeleteVolumeGroupSnapshotRequest{} } -func (m *DeleteVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeGroupSnapshotRequest) ProtoMessage() {} -func (*DeleteVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{66} +func (x *DeleteVolumeGroupSnapshotRequest) Reset() { + *x = DeleteVolumeGroupSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Unmarshal(m, b) -} -func (m *DeleteVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) -} -func (m *DeleteVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Merge(m, src) +func (x *DeleteVolumeGroupSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteVolumeGroupSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.Size(m) -} -func (m *DeleteVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeGroupSnapshotRequest.DiscardUnknown(m) + +func (*DeleteVolumeGroupSnapshotRequest) ProtoMessage() {} + +func (x *DeleteVolumeGroupSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[68] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteVolumeGroupSnapshotRequest proto.InternalMessageInfo +// Deprecated: Use DeleteVolumeGroupSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{68} +} -func (m *DeleteVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { - if m != nil { - return m.GroupSnapshotId +func (x *DeleteVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { + if x != nil { + return x.GroupSnapshotId } return "" } -func (m *DeleteVolumeGroupSnapshotRequest) GetSnapshotIds() []string { - if m != nil { - return m.SnapshotIds +func (x *DeleteVolumeGroupSnapshotRequest) GetSnapshotIds() []string { + if x != nil { + return x.SnapshotIds } return nil } -func (m *DeleteVolumeGroupSnapshotRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *DeleteVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type DeleteVolumeGroupSnapshotResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *DeleteVolumeGroupSnapshotResponse) Reset() { *m = DeleteVolumeGroupSnapshotResponse{} } -func (m *DeleteVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeGroupSnapshotResponse) ProtoMessage() {} -func (*DeleteVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{67} +func (x *DeleteVolumeGroupSnapshotResponse) Reset() { + *x = DeleteVolumeGroupSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *DeleteVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Unmarshal(m, b) -} -func (m *DeleteVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) +func (x *DeleteVolumeGroupSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Merge(m, src) -} -func (m *DeleteVolumeGroupSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.Size(m) -} -func (m *DeleteVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteVolumeGroupSnapshotResponse.DiscardUnknown(m) + +func (*DeleteVolumeGroupSnapshotResponse) ProtoMessage() {} + +func (x *DeleteVolumeGroupSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[69] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_DeleteVolumeGroupSnapshotResponse proto.InternalMessageInfo +// Deprecated: Use DeleteVolumeGroupSnapshotResponse.ProtoReflect.Descriptor instead. +func (*DeleteVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{69} +} type GetVolumeGroupSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The ID of the group snapshot to fetch current group snapshot // information for. // This field is REQUIRED. @@ -5484,1721 +5620,4302 @@ type GetVolumeGroupSnapshotRequest struct { // section on how to use this field. // The secrets provided in this field SHOULD be the same for // all group snapshot operations on the same group snapshot. - Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *GetVolumeGroupSnapshotRequest) Reset() { *m = GetVolumeGroupSnapshotRequest{} } -func (m *GetVolumeGroupSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*GetVolumeGroupSnapshotRequest) ProtoMessage() {} -func (*GetVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{68} +func (x *GetVolumeGroupSnapshotRequest) Reset() { + *x = GetVolumeGroupSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetVolumeGroupSnapshotRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Unmarshal(m, b) -} -func (m *GetVolumeGroupSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Marshal(b, m, deterministic) +func (x *GetVolumeGroupSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetVolumeGroupSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVolumeGroupSnapshotRequest.Merge(m, src) -} -func (m *GetVolumeGroupSnapshotRequest) XXX_Size() int { - return xxx_messageInfo_GetVolumeGroupSnapshotRequest.Size(m) -} -func (m *GetVolumeGroupSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVolumeGroupSnapshotRequest.DiscardUnknown(m) + +func (*GetVolumeGroupSnapshotRequest) ProtoMessage() {} + +func (x *GetVolumeGroupSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[70] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetVolumeGroupSnapshotRequest proto.InternalMessageInfo +// Deprecated: Use GetVolumeGroupSnapshotRequest.ProtoReflect.Descriptor instead. +func (*GetVolumeGroupSnapshotRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{70} +} -func (m *GetVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { - if m != nil { - return m.GroupSnapshotId +func (x *GetVolumeGroupSnapshotRequest) GetGroupSnapshotId() string { + if x != nil { + return x.GroupSnapshotId } return "" } -func (m *GetVolumeGroupSnapshotRequest) GetSnapshotIds() []string { - if m != nil { - return m.SnapshotIds +func (x *GetVolumeGroupSnapshotRequest) GetSnapshotIds() []string { + if x != nil { + return x.SnapshotIds } return nil } -func (m *GetVolumeGroupSnapshotRequest) GetSecrets() map[string]string { - if m != nil { - return m.Secrets +func (x *GetVolumeGroupSnapshotRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } return nil } type GetVolumeGroupSnapshotResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // This field is REQUIRED - GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + GroupSnapshot *VolumeGroupSnapshot `protobuf:"bytes,1,opt,name=group_snapshot,json=groupSnapshot,proto3" json:"group_snapshot,omitempty"` } -func (m *GetVolumeGroupSnapshotResponse) Reset() { *m = GetVolumeGroupSnapshotResponse{} } -func (m *GetVolumeGroupSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*GetVolumeGroupSnapshotResponse) ProtoMessage() {} -func (*GetVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9cdb00adce470e01, []int{69} +func (x *GetVolumeGroupSnapshotResponse) Reset() { + *x = GetVolumeGroupSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetVolumeGroupSnapshotResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Unmarshal(m, b) -} -func (m *GetVolumeGroupSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Marshal(b, m, deterministic) -} -func (m *GetVolumeGroupSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVolumeGroupSnapshotResponse.Merge(m, src) -} -func (m *GetVolumeGroupSnapshotResponse) XXX_Size() int { - return xxx_messageInfo_GetVolumeGroupSnapshotResponse.Size(m) -} -func (m *GetVolumeGroupSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVolumeGroupSnapshotResponse.DiscardUnknown(m) +func (x *GetVolumeGroupSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetVolumeGroupSnapshotResponse proto.InternalMessageInfo +func (*GetVolumeGroupSnapshotResponse) ProtoMessage() {} -func (m *GetVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { - if m != nil { - return m.GroupSnapshot +func (x *GetVolumeGroupSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -var E_AlphaEnum = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_enum", - Tag: "varint,1060,opt,name=alpha_enum", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_AlphaEnumValue = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumValueOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_enum_value", - Tag: "varint,1060,opt,name=alpha_enum_value", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_CsiSecret = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1059, - Name: "csi.v1.csi_secret", - Tag: "varint,1059,opt,name=csi_secret", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_AlphaField = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_field", - Tag: "varint,1060,opt,name=alpha_field", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_AlphaMessage = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_message", - Tag: "varint,1060,opt,name=alpha_message", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_AlphaMethod = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MethodOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_method", - Tag: "varint,1060,opt,name=alpha_method", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -var E_AlphaService = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 1060, - Name: "csi.v1.alpha_service", - Tag: "varint,1060,opt,name=alpha_service", - Filename: "github.com/container-storage-interface/spec/csi.proto", -} - -func init() { - proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) - proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value) - proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) - proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) - proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.v1.GroupControllerServiceCapability_RPC_Type", GroupControllerServiceCapability_RPC_Type_name, GroupControllerServiceCapability_RPC_Type_value) - proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") - proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") - proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest") - proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse") - proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability") - proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service") - proto.RegisterType((*PluginCapability_VolumeExpansion)(nil), "csi.v1.PluginCapability.VolumeExpansion") - proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest") - proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse") - proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry") - proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource") - proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource") - proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource") - proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse") - proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability") - proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume") - proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume") - proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode") - proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange") - proto.RegisterType((*Volume)(nil), "csi.v1.Volume") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry") - proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement") - proto.RegisterType((*Topology)(nil), "csi.v1.Topology") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry") - proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry") - proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse") - proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry") - proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry") - proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry") - proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse") - proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry") - proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse") - proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry") - proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest") - proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse") - proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus") - proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry") - proto.RegisterType((*ControllerGetVolumeRequest)(nil), "csi.v1.ControllerGetVolumeRequest") - proto.RegisterType((*ControllerGetVolumeResponse)(nil), "csi.v1.ControllerGetVolumeResponse") - proto.RegisterType((*ControllerGetVolumeResponse_VolumeStatus)(nil), "csi.v1.ControllerGetVolumeResponse.VolumeStatus") - proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry") - proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse") - proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest") - proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse") - proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability") - proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC") - proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry") - proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse") - proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot") - proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry") - proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse") - proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry") - proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse") - proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry") - proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerExpandVolumeRequest.SecretsEntry") - proto.RegisterType((*ControllerExpandVolumeResponse)(nil), "csi.v1.ControllerExpandVolumeResponse") - proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry") - proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse") - proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest") - proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse") - proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry") - proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse") - proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest") - proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse") - proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest") - proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse") - proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage") - proto.RegisterType((*VolumeCondition)(nil), "csi.v1.VolumeCondition") - proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest") - proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse") - proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability") - proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC") - proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest") - proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse") - proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeExpandVolumeRequest.SecretsEntry") - proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse") - proto.RegisterType((*GroupControllerGetCapabilitiesRequest)(nil), "csi.v1.GroupControllerGetCapabilitiesRequest") - proto.RegisterType((*GroupControllerGetCapabilitiesResponse)(nil), "csi.v1.GroupControllerGetCapabilitiesResponse") - proto.RegisterType((*GroupControllerServiceCapability)(nil), "csi.v1.GroupControllerServiceCapability") - proto.RegisterType((*GroupControllerServiceCapability_RPC)(nil), "csi.v1.GroupControllerServiceCapability.RPC") - proto.RegisterType((*CreateVolumeGroupSnapshotRequest)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntry") - proto.RegisterType((*CreateVolumeGroupSnapshotResponse)(nil), "csi.v1.CreateVolumeGroupSnapshotResponse") - proto.RegisterType((*VolumeGroupSnapshot)(nil), "csi.v1.VolumeGroupSnapshot") - proto.RegisterType((*DeleteVolumeGroupSnapshotRequest)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntry") - proto.RegisterType((*DeleteVolumeGroupSnapshotResponse)(nil), "csi.v1.DeleteVolumeGroupSnapshotResponse") - proto.RegisterType((*GetVolumeGroupSnapshotRequest)(nil), "csi.v1.GetVolumeGroupSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntry") - proto.RegisterType((*GetVolumeGroupSnapshotResponse)(nil), "csi.v1.GetVolumeGroupSnapshotResponse") - proto.RegisterExtension(E_AlphaEnum) - proto.RegisterExtension(E_AlphaEnumValue) - proto.RegisterExtension(E_CsiSecret) - proto.RegisterExtension(E_AlphaField) - proto.RegisterExtension(E_AlphaMessage) - proto.RegisterExtension(E_AlphaMethod) - proto.RegisterExtension(E_AlphaService) -} - -func init() { - proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01) -} - -var fileDescriptor_9cdb00adce470e01 = []byte{ - // 4182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1b, 0x49, - 0x76, 0x56, 0xf3, 0x4f, 0xd2, 0xa3, 0x24, 0x53, 0xa5, 0x1f, 0xd3, 0x2d, 0x59, 0x96, 0xda, 0xe3, - 0x19, 0x8d, 0xc7, 0xa6, 0x67, 0xbc, 0x33, 0x83, 0x1d, 0x8d, 0x67, 0x77, 0x48, 0x89, 0x96, 0xb8, - 0xa6, 0x49, 0x6d, 0x93, 0xf2, 0xac, 0x9d, 0x0c, 0x7a, 0x5a, 0x64, 0x49, 0x6e, 0x0c, 0xd9, 0xcd, - 0xe9, 0x6e, 0x2a, 0xd6, 0xe6, 0x90, 0x64, 0x83, 0x20, 0x1b, 0xe4, 0x12, 0x24, 0x87, 0x4c, 0x4e, - 0x59, 0x24, 0x39, 0xee, 0x62, 0x0f, 0x41, 0x10, 0x20, 0x97, 0x00, 0xb9, 0x25, 0x40, 0x90, 0x1c, - 0x93, 0x5c, 0xf6, 0x10, 0x20, 0x87, 0x45, 0x02, 0x4c, 0x2e, 0x39, 0xe4, 0x10, 0x04, 0x5d, 0x55, - 0xfd, 0xff, 0x43, 0xd2, 0x92, 0x33, 0x01, 0xf6, 0x64, 0x75, 0xd5, 0xab, 0x57, 0xaf, 0xaa, 0xde, - 0x7b, 0xf5, 0xde, 0xf7, 0x8a, 0x86, 0xf7, 0x4e, 0x15, 0xf3, 0xf9, 0xf0, 0xb8, 0xd4, 0xd1, 0xfa, - 0xf7, 0x3a, 0x9a, 0x6a, 0xca, 0x8a, 0x8a, 0xf5, 0xbb, 0x86, 0xa9, 0xe9, 0xf2, 0x29, 0xbe, 0xab, - 0xa8, 0x26, 0xd6, 0x4f, 0xe4, 0x0e, 0xbe, 0x67, 0x0c, 0x70, 0xe7, 0x5e, 0xc7, 0x50, 0x4a, 0x03, - 0x5d, 0x33, 0x35, 0x94, 0xb3, 0xfe, 0x3c, 0x7b, 0x87, 0xdf, 0x3c, 0xd5, 0xb4, 0xd3, 0x1e, 0xbe, - 0x47, 0x5a, 0x8f, 0x87, 0x27, 0xf7, 0xba, 0xd8, 0xe8, 0xe8, 0xca, 0xc0, 0xd4, 0x74, 0x4a, 0xc9, - 0xdf, 0x08, 0x52, 0x98, 0x4a, 0x1f, 0x1b, 0xa6, 0xdc, 0x1f, 0x30, 0x82, 0x8d, 0x20, 0xc1, 0xaf, - 0xe8, 0xf2, 0x60, 0x80, 0x75, 0x83, 0xf6, 0x0b, 0xab, 0xb0, 0xbc, 0x8f, 0xcd, 0xc3, 0xde, 0xf0, - 0x54, 0x51, 0x6b, 0xea, 0x89, 0x26, 0xe2, 0x2f, 0x86, 0xd8, 0x30, 0x85, 0x7f, 0xe2, 0x60, 0x25, - 0xd0, 0x61, 0x0c, 0x34, 0xd5, 0xc0, 0x08, 0x41, 0x46, 0x95, 0xfb, 0xb8, 0xc8, 0x6d, 0x72, 0xdb, - 0xb3, 0x22, 0xf9, 0x1b, 0xdd, 0x82, 0x85, 0x33, 0xac, 0x76, 0x35, 0x5d, 0x3a, 0xc3, 0xba, 0xa1, - 0x68, 0x6a, 0x31, 0x45, 0x7a, 0xe7, 0x69, 0xeb, 0x13, 0xda, 0x88, 0xf6, 0x61, 0xa6, 0x2f, 0xab, - 0xca, 0x09, 0x36, 0xcc, 0x62, 0x7a, 0x33, 0xbd, 0x9d, 0xbf, 0xff, 0x56, 0x89, 0x2e, 0xb5, 0x14, - 0x39, 0x57, 0xe9, 0x31, 0xa3, 0xae, 0xaa, 0xa6, 0x7e, 0x2e, 0x3a, 0x83, 0xf9, 0x0f, 0x61, 0xde, - 0xd7, 0x85, 0x0a, 0x90, 0xfe, 0x1c, 0x9f, 0x33, 0x99, 0xac, 0x3f, 0xd1, 0x32, 0x64, 0xcf, 0xe4, - 0xde, 0x10, 0x33, 0x49, 0xe8, 0xc7, 0x4e, 0xea, 0x9b, 0x9c, 0xb0, 0x01, 0xeb, 0xce, 0x6c, 0xbb, - 0xf2, 0x40, 0x3e, 0x56, 0x7a, 0x8a, 0xa9, 0x60, 0xc3, 0x5e, 0xfa, 0xa7, 0x70, 0x3d, 0xa6, 0x9f, - 0xed, 0xc0, 0x03, 0x98, 0xeb, 0x78, 0xda, 0x8b, 0x1c, 0x59, 0x4a, 0xd1, 0x5e, 0x4a, 0x60, 0xe4, - 0xb9, 0xe8, 0xa3, 0x16, 0xfe, 0x33, 0x0d, 0x85, 0x20, 0x09, 0x7a, 0x00, 0xd3, 0x06, 0xd6, 0xcf, - 0x94, 0x0e, 0xdd, 0xd7, 0xfc, 0xfd, 0xcd, 0x38, 0x6e, 0xa5, 0x16, 0xa5, 0x3b, 0x98, 0x12, 0xed, - 0x21, 0xe8, 0x08, 0x0a, 0x67, 0x5a, 0x6f, 0xd8, 0xc7, 0x12, 0x7e, 0x31, 0x90, 0x55, 0xe7, 0x00, - 0xf2, 0xf7, 0xb7, 0x63, 0xd9, 0x3c, 0x21, 0x03, 0xaa, 0x36, 0xfd, 0xc1, 0x94, 0x78, 0xe5, 0xcc, - 0xdf, 0xc4, 0xff, 0x15, 0x07, 0xd3, 0x6c, 0x36, 0xf4, 0x01, 0x64, 0xcc, 0xf3, 0x01, 0x95, 0x6e, - 0xe1, 0xfe, 0xad, 0x51, 0xd2, 0x95, 0xda, 0xe7, 0x03, 0x2c, 0x92, 0x21, 0x82, 0x09, 0x19, 0xeb, - 0x0b, 0xe5, 0x61, 0xfa, 0xa8, 0xf1, 0xa8, 0xd1, 0xfc, 0xa4, 0x51, 0x98, 0x42, 0xab, 0x80, 0x76, - 0x9b, 0x8d, 0xb6, 0xd8, 0xac, 0xd7, 0xab, 0xa2, 0xd4, 0xaa, 0x8a, 0x4f, 0x6a, 0xbb, 0xd5, 0x02, - 0x87, 0x5e, 0x83, 0xcd, 0x27, 0xcd, 0xfa, 0xd1, 0xe3, 0xaa, 0x54, 0xde, 0xdd, 0xad, 0xb6, 0x5a, - 0xb5, 0x4a, 0xad, 0x5e, 0x6b, 0x3f, 0x95, 0x76, 0x9b, 0x8d, 0x56, 0x5b, 0x2c, 0xd7, 0x1a, 0xed, - 0x56, 0x21, 0x85, 0xb6, 0xa0, 0xb8, 0x2f, 0x36, 0x8f, 0x0e, 0xa5, 0x08, 0x1e, 0x69, 0x3e, 0xfd, - 0xa3, 0x0a, 0xc7, 0xff, 0x80, 0x83, 0x2b, 0x81, 0x35, 0xa2, 0xb2, 0x6f, 0x11, 0x77, 0xc7, 0xdd, - 0x1b, 0xef, 0x62, 0xee, 0x44, 0x2d, 0x06, 0x20, 0xd7, 0x6c, 0xd4, 0x6b, 0x0d, 0x6b, 0x01, 0x79, - 0x98, 0x6e, 0x3e, 0x7c, 0x48, 0x3e, 0x52, 0x95, 0x1c, 0x9d, 0x50, 0x58, 0x80, 0xb9, 0x43, 0x5d, - 0x3b, 0xc6, 0xb6, 0x8a, 0x95, 0x61, 0x9e, 0x7d, 0x33, 0x95, 0x7a, 0x1b, 0xb2, 0x3a, 0x96, 0xbb, - 0xe7, 0xec, 0xf4, 0xf9, 0x12, 0x35, 0xdb, 0x92, 0x6d, 0xb6, 0xa5, 0x8a, 0xa6, 0xf5, 0x9e, 0x58, - 0x2a, 0x2c, 0x52, 0x42, 0xe1, 0xab, 0x0c, 0x2c, 0xed, 0xea, 0x58, 0x36, 0x31, 0x95, 0x96, 0xb1, - 0x8e, 0x34, 0xcf, 0x07, 0xb0, 0x60, 0xa9, 0x60, 0x47, 0x31, 0xcf, 0x25, 0x5d, 0x56, 0x4f, 0x31, - 0xd3, 0x8e, 0x15, 0x7b, 0x07, 0x76, 0x59, 0xaf, 0x68, 0x75, 0x8a, 0xf3, 0x1d, 0xef, 0x27, 0xaa, - 0xc1, 0x12, 0xd3, 0x2e, 0x9f, 0xd6, 0xa7, 0xfd, 0x5a, 0x4f, 0xa5, 0xf0, 0x68, 0x3d, 0x3a, 0xf3, - 0xb7, 0x28, 0xd8, 0x40, 0x8f, 0x00, 0x06, 0xb2, 0x2e, 0xf7, 0xb1, 0x89, 0x75, 0xa3, 0x98, 0xf1, - 0xbb, 0x80, 0x88, 0xd5, 0x94, 0x0e, 0x1d, 0x6a, 0xea, 0x02, 0x3c, 0xc3, 0xd1, 0xbe, 0x65, 0x33, - 0x1d, 0x1d, 0x9b, 0x46, 0x31, 0x4b, 0x38, 0x6d, 0x27, 0x71, 0x6a, 0x51, 0x52, 0xc2, 0xa6, 0x92, - 0xfe, 0xb2, 0xc2, 0x89, 0xf6, 0x68, 0xd4, 0x84, 0x15, 0x7b, 0x81, 0x9a, 0x6a, 0x62, 0xd5, 0x94, - 0x0c, 0x6d, 0xa8, 0x77, 0x70, 0x31, 0x47, 0x76, 0x69, 0x2d, 0xb0, 0x44, 0x4a, 0xd3, 0x22, 0x24, - 0x22, 0xdb, 0x1a, 0x5f, 0x23, 0x7a, 0x06, 0xbc, 0xdc, 0xe9, 0x60, 0xc3, 0x50, 0xe8, 0x5e, 0x48, - 0x3a, 0xfe, 0x62, 0xa8, 0xe8, 0xb8, 0x8f, 0x55, 0xd3, 0x28, 0x4e, 0xfb, 0xb9, 0xb6, 0xb5, 0x81, - 0xd6, 0xd3, 0x4e, 0xcf, 0x45, 0x97, 0x46, 0xbc, 0xe6, 0x1b, 0xee, 0xe9, 0x31, 0xf8, 0x8f, 0xe0, - 0x4a, 0x60, 0x53, 0x26, 0x71, 0x7e, 0xfc, 0x0e, 0xcc, 0x79, 0x77, 0x62, 0x22, 0xc7, 0xf9, 0xbb, - 0x29, 0x58, 0x8a, 0xd8, 0x03, 0x74, 0x00, 0x33, 0x86, 0x2a, 0x0f, 0x8c, 0xe7, 0x9a, 0xc9, 0xf4, - 0xf7, 0x76, 0xc2, 0x96, 0x95, 0x5a, 0x8c, 0x96, 0x7e, 0x1e, 0x4c, 0x89, 0xce, 0x68, 0x54, 0x81, - 0x1c, 0xdd, 0xcf, 0xa0, 0xfb, 0x8a, 0xe2, 0x43, 0xdb, 0x1c, 0x2e, 0x6c, 0x24, 0xff, 0x0e, 0x2c, - 0xf8, 0x67, 0x40, 0x37, 0x20, 0x6f, 0xcf, 0x20, 0x29, 0x5d, 0xb6, 0x56, 0xb0, 0x9b, 0x6a, 0x5d, - 0xfe, 0x2d, 0x98, 0xf3, 0x32, 0x43, 0x6b, 0x30, 0xcb, 0x14, 0xc2, 0x21, 0x9f, 0xa1, 0x0d, 0xb5, - 0xae, 0x63, 0xd3, 0xdf, 0x82, 0x65, 0xbf, 0x9e, 0x31, 0x53, 0x7e, 0xdd, 0x59, 0x03, 0xdd, 0x8b, - 0x05, 0xff, 0x1a, 0x6c, 0x39, 0x85, 0x3f, 0xce, 0x42, 0x21, 0x68, 0x34, 0xe8, 0x01, 0x64, 0x8f, - 0x7b, 0x5a, 0xe7, 0x73, 0x36, 0xf6, 0xb5, 0x38, 0xeb, 0x2a, 0x55, 0x2c, 0x2a, 0xda, 0x7a, 0x30, - 0x25, 0xd2, 0x41, 0xd6, 0xe8, 0xbe, 0x36, 0x54, 0x4d, 0xb6, 0x7b, 0xf1, 0xa3, 0x1f, 0x5b, 0x54, - 0xee, 0x68, 0x32, 0x08, 0xed, 0x41, 0x9e, 0xaa, 0x9d, 0xd4, 0xd7, 0xba, 0xb8, 0x98, 0x26, 0x3c, - 0x6e, 0xc6, 0xf2, 0x28, 0x13, 0xda, 0xc7, 0x5a, 0x17, 0x8b, 0x20, 0x3b, 0x7f, 0xf3, 0xf3, 0x90, - 0xf7, 0xc8, 0xc6, 0x0f, 0x21, 0xef, 0x99, 0x0c, 0x5d, 0x85, 0xe9, 0x13, 0x43, 0x72, 0x9c, 0xf0, - 0xac, 0x98, 0x3b, 0x31, 0x88, 0x3f, 0xbd, 0x01, 0x79, 0x22, 0x85, 0x74, 0xd2, 0x93, 0x4f, 0x8d, - 0x62, 0x6a, 0x33, 0x6d, 0x9d, 0x11, 0x69, 0x7a, 0x68, 0xb5, 0xa0, 0x3b, 0xc0, 0x1c, 0x8a, 0x44, - 0xe9, 0x4e, 0x75, 0x6d, 0x38, 0x20, 0x42, 0xce, 0x8a, 0xec, 0xf6, 0x23, 0x13, 0xed, 0x5b, 0xed, - 0xfc, 0x9f, 0xa7, 0x00, 0x5c, 0x01, 0xd1, 0x03, 0xc8, 0x90, 0x35, 0x51, 0xc7, 0xbf, 0x3d, 0xc6, - 0x9a, 0x4a, 0x64, 0x61, 0x64, 0x94, 0xf0, 0x6f, 0x1c, 0x64, 0x08, 0x9b, 0xe0, 0x0d, 0xd6, 0xaa, - 0x35, 0xf6, 0xeb, 0x55, 0xa9, 0xd1, 0xdc, 0xab, 0x4a, 0x9f, 0x88, 0xb5, 0x76, 0x55, 0x2c, 0x70, - 0x68, 0x0d, 0xae, 0x7a, 0xdb, 0xc5, 0x6a, 0x79, 0xaf, 0x2a, 0x4a, 0xcd, 0x46, 0xfd, 0x69, 0x21, - 0x85, 0x78, 0x58, 0x7d, 0x7c, 0x54, 0x6f, 0xd7, 0xc2, 0x7d, 0x69, 0xb4, 0x0e, 0x45, 0x4f, 0x1f, - 0xe3, 0xc1, 0xd8, 0x66, 0x2c, 0xb6, 0x9e, 0x5e, 0xfa, 0x27, 0xeb, 0xcc, 0x22, 0x01, 0xae, 0x79, - 0xe7, 0xf4, 0x8f, 0xcd, 0x91, 0x0b, 0xd1, 0xba, 0x33, 0xbd, 0x34, 0x3e, 0x0e, 0xd3, 0x84, 0xa4, - 0x32, 0xef, 0x68, 0x00, 0xd1, 0xf0, 0x4f, 0x60, 0xde, 0x77, 0x31, 0x58, 0x61, 0x1e, 0xf3, 0x64, - 0x5d, 0xe9, 0xf8, 0xdc, 0x24, 0xa1, 0x0f, 0xb7, 0x9d, 0x16, 0xe7, 0xed, 0xd6, 0x8a, 0xd5, 0x68, - 0x9d, 0x65, 0x4f, 0xe9, 0x2b, 0x26, 0xa3, 0x49, 0x11, 0x1a, 0x20, 0x4d, 0x84, 0x40, 0xf8, 0x59, - 0x0a, 0x72, 0x4c, 0x21, 0x6e, 0x79, 0xae, 0x26, 0x1f, 0x4b, 0xbb, 0x95, 0xb2, 0xf4, 0x59, 0x64, - 0xca, 0x6f, 0x91, 0xe8, 0x00, 0x16, 0xbc, 0xfe, 0xfb, 0x85, 0x1d, 0x5c, 0x6e, 0xf9, 0xcf, 0xd9, - 0xeb, 0x44, 0x5e, 0xb0, 0x90, 0x72, 0xfe, 0xcc, 0xdb, 0x86, 0x2a, 0xb0, 0x10, 0xb8, 0x02, 0x32, - 0xa3, 0xaf, 0x80, 0xf9, 0x8e, 0xcf, 0x1b, 0x96, 0x61, 0xc9, 0xf6, 0xde, 0x3d, 0x2c, 0x99, 0xcc, - 0xbb, 0xb3, 0x2b, 0xaa, 0x10, 0xf2, 0xfa, 0xc8, 0x25, 0xb6, 0xdb, 0xf8, 0x8f, 0x01, 0x85, 0x65, - 0x9d, 0xc8, 0x55, 0x0f, 0x61, 0x29, 0xe2, 0x5e, 0x41, 0x25, 0x98, 0x25, 0x47, 0x65, 0x28, 0x26, - 0x66, 0x61, 0x6b, 0x58, 0x22, 0x97, 0xc4, 0xa2, 0x1f, 0xe8, 0xf8, 0x04, 0xeb, 0x3a, 0xee, 0x12, - 0x9b, 0x8c, 0xa4, 0x77, 0x48, 0x84, 0xdf, 0xe4, 0x60, 0xc6, 0x6e, 0x47, 0x3b, 0x30, 0x63, 0xe0, - 0x53, 0x7a, 0xe7, 0xd1, 0xb9, 0x36, 0x82, 0x63, 0x4b, 0x2d, 0x46, 0xc0, 0x02, 0x7c, 0x9b, 0xde, - 0x0a, 0xf0, 0x7d, 0x5d, 0x13, 0x2d, 0xfe, 0x2f, 0x39, 0x58, 0xda, 0xc3, 0x3d, 0x1c, 0x0c, 0x8d, - 0x92, 0xdc, 0xba, 0x37, 0x9a, 0x48, 0xf9, 0xa3, 0x89, 0x08, 0x56, 0x09, 0xd1, 0xc4, 0x85, 0x6e, - 0xd8, 0x55, 0x58, 0xf6, 0xcf, 0x46, 0xef, 0x14, 0xe1, 0x3f, 0xd2, 0xb0, 0x61, 0xe9, 0x82, 0xae, - 0xf5, 0x7a, 0x58, 0x3f, 0x1c, 0x1e, 0xf7, 0x14, 0xe3, 0xf9, 0x04, 0x8b, 0xbb, 0x0a, 0xd3, 0xaa, - 0xd6, 0xf5, 0x18, 0x4f, 0xce, 0xfa, 0xac, 0x75, 0x51, 0x15, 0x16, 0x83, 0xb1, 0xdd, 0x39, 0xf3, - 0xfc, 0xf1, 0x91, 0x5d, 0xe1, 0x2c, 0x78, 0x6d, 0xf1, 0x30, 0x63, 0x45, 0xa5, 0x9a, 0xda, 0x3b, - 0x27, 0x16, 0x33, 0x23, 0x3a, 0xdf, 0x48, 0x0c, 0x86, 0x69, 0xdf, 0x70, 0xc2, 0xb4, 0xc4, 0x15, - 0x25, 0x45, 0x6c, 0x9f, 0x85, 0x2c, 0x3e, 0x47, 0x58, 0x7f, 0x30, 0x26, 0xeb, 0x91, 0x9e, 0xe0, - 0x22, 0xa7, 0x78, 0x09, 0xe6, 0xfb, 0x77, 0x1c, 0xdc, 0x88, 0x5d, 0x02, 0x8b, 0x33, 0xba, 0x70, - 0x65, 0x40, 0x3b, 0x9c, 0x4d, 0xa0, 0x56, 0xf6, 0xe1, 0xc8, 0x4d, 0x60, 0xd9, 0x35, 0x6b, 0xf5, - 0x6d, 0xc3, 0xc2, 0xc0, 0xd7, 0xc8, 0x97, 0x61, 0x29, 0x82, 0x6c, 0xa2, 0xc5, 0xfc, 0x9c, 0x83, - 0x4d, 0x57, 0x94, 0x23, 0x75, 0x70, 0x79, 0xea, 0xdb, 0x76, 0x75, 0x8b, 0xba, 0xfc, 0xf7, 0xc2, - 0x6b, 0x8f, 0x9e, 0xf0, 0x55, 0x59, 0xf0, 0x4d, 0xd8, 0x4a, 0x98, 0x9a, 0x99, 0xf3, 0xcf, 0x32, - 0xb0, 0xf5, 0x44, 0xee, 0x29, 0x5d, 0x27, 0x7a, 0x8c, 0xc0, 0x21, 0x92, 0xb7, 0xa4, 0x13, 0xb2, - 0x00, 0xea, 0xb5, 0x1e, 0x38, 0x56, 0x3b, 0x8a, 0xff, 0x18, 0xd7, 0xe1, 0x25, 0x66, 0x7e, 0x4f, - 0x23, 0x32, 0xbf, 0x0f, 0xc6, 0x97, 0x35, 0x29, 0x0f, 0x3c, 0x0a, 0x3a, 0x98, 0xf7, 0xc7, 0xe7, - 0x9b, 0xa0, 0x05, 0x17, 0xb6, 0xe2, 0xaf, 0x33, 0x55, 0xfb, 0x9b, 0x0c, 0x08, 0x49, 0xab, 0x67, - 0x3e, 0x44, 0x84, 0xd9, 0x8e, 0xa6, 0x9e, 0x28, 0x7a, 0x1f, 0x77, 0x59, 0xca, 0xf1, 0xee, 0x38, - 0x9b, 0xc7, 0x1c, 0xc8, 0xae, 0x3d, 0x56, 0x74, 0xd9, 0xa0, 0x22, 0x4c, 0xf7, 0xb1, 0x61, 0xc8, - 0xa7, 0xb6, 0x58, 0xf6, 0x27, 0xff, 0x93, 0x34, 0xcc, 0x3a, 0x43, 0x90, 0x1a, 0xd2, 0x60, 0xea, - 0xbe, 0xf6, 0x5f, 0x46, 0x80, 0x97, 0x57, 0xe6, 0xd4, 0x4b, 0x28, 0x73, 0xd7, 0xa7, 0xcc, 0xd4, - 0x1c, 0xf6, 0x5e, 0x4a, 0xec, 0x04, 0xbd, 0xfe, 0xda, 0x15, 0x50, 0xf8, 0x65, 0x40, 0x75, 0xc5, - 0x60, 0xa9, 0x9b, 0xe3, 0x96, 0xac, 0x4c, 0x4d, 0x7e, 0x21, 0x61, 0xd5, 0xd4, 0x15, 0x16, 0xae, - 0x67, 0x45, 0xe8, 0xcb, 0x2f, 0xaa, 0xb4, 0xc5, 0x0a, 0xe9, 0x0d, 0x53, 0xd6, 0x4d, 0x45, 0x3d, - 0x95, 0x4c, 0xed, 0x73, 0xec, 0x80, 0xc1, 0x76, 0x6b, 0xdb, 0x6a, 0x14, 0xfe, 0x3d, 0x05, 0x4b, - 0x3e, 0xf6, 0x4c, 0x27, 0x3f, 0x84, 0x69, 0x97, 0xb7, 0x2f, 0x8c, 0x8f, 0xa0, 0x2e, 0xd1, 0x6d, - 0xb3, 0x47, 0xa0, 0xeb, 0x00, 0x2a, 0x7e, 0x61, 0xfa, 0xe6, 0x9d, 0xb5, 0x5a, 0xc8, 0x9c, 0xfc, - 0x6f, 0x71, 0x4e, 0xa6, 0x6f, 0xca, 0xe6, 0x90, 0x64, 0x95, 0xcc, 0x45, 0xe3, 0xae, 0xc4, 0xee, - 0x18, 0x3a, 0xef, 0xac, 0x58, 0x70, 0x7a, 0x1a, 0xe4, 0xb6, 0x31, 0xd0, 0xbe, 0x83, 0xb3, 0x76, - 0x34, 0xb5, 0xab, 0x98, 0x2e, 0xce, 0x7a, 0x35, 0x94, 0x20, 0xd0, 0xee, 0x8a, 0x95, 0x57, 0xd9, - 0xc8, 0xaa, 0xd3, 0xca, 0x7f, 0x01, 0x59, 0x7a, 0x1c, 0x63, 0x82, 0x05, 0xe8, 0x63, 0xc8, 0x19, - 0x44, 0xe2, 0x20, 0x30, 0x12, 0xb5, 0x27, 0xde, 0x15, 0x8a, 0x6c, 0x9c, 0xf0, 0x2d, 0xe0, 0xdd, - 0x8b, 0x69, 0x1f, 0x9b, 0xe3, 0x5f, 0xbf, 0x3b, 0xd6, 0x1a, 0x84, 0x3f, 0x4c, 0xc1, 0x5a, 0x24, - 0x83, 0xc9, 0x60, 0x0f, 0x74, 0x10, 0x58, 0xc9, 0xdb, 0xe1, 0x1b, 0x3b, 0xc4, 0x3c, 0x72, 0x45, - 0xfc, 0xaf, 0x5f, 0xec, 0x30, 0x2b, 0x13, 0x1f, 0x66, 0xe8, 0x1c, 0xe9, 0xce, 0xfc, 0x24, 0x05, - 0x68, 0x1f, 0x9b, 0x4e, 0xaa, 0xcc, 0xb6, 0x34, 0xc6, 0xdf, 0x70, 0x2f, 0xe1, 0x6f, 0xbe, 0xe3, - 0xf3, 0x37, 0xd4, 0x63, 0xdd, 0xf6, 0x54, 0x4e, 0x02, 0x53, 0x27, 0xde, 0x96, 0x31, 0xe9, 0x29, - 0x8d, 0xf9, 0xc7, 0x4b, 0x4f, 0x2f, 0xe8, 0x56, 0xfe, 0x95, 0x83, 0x25, 0x9f, 0xd0, 0x4c, 0x83, - 0xee, 0x02, 0x92, 0xcf, 0x64, 0xa5, 0x27, 0x5b, 0x82, 0xd9, 0xe9, 0x3f, 0x83, 0x03, 0x16, 0x9d, - 0x1e, 0x7b, 0x18, 0x7a, 0x04, 0x4b, 0x7d, 0xf9, 0x85, 0xd2, 0x1f, 0xf6, 0x25, 0xb6, 0xcf, 0x86, - 0xf2, 0x7d, 0x1b, 0x38, 0x5c, 0x0b, 0x01, 0xe8, 0x35, 0xd5, 0x7c, 0xff, 0x5d, 0x8a, 0xa0, 0x2f, - 0xb2, 0x71, 0x4c, 0x79, 0x94, 0xef, 0x63, 0x74, 0x08, 0x4b, 0x7d, 0x45, 0x0d, 0x31, 0x4b, 0x8f, - 0x64, 0x46, 0x0d, 0x7c, 0x91, 0x0d, 0x76, 0x39, 0x0a, 0x82, 0x37, 0xe8, 0x65, 0xcb, 0x0d, 0x56, - 0x9a, 0x7a, 0xde, 0x60, 0x31, 0x44, 0xc3, 0xb6, 0x65, 0x3f, 0xb2, 0xda, 0x74, 0x33, 0x6c, 0x36, - 0xac, 0xf4, 0x12, 0x5b, 0x78, 0xfa, 0x9f, 0xb4, 0xd7, 0x82, 0x43, 0xd4, 0xe8, 0x43, 0x48, 0xeb, - 0x83, 0x0e, 0x33, 0xdf, 0x37, 0xc6, 0xe0, 0x5f, 0x12, 0x0f, 0x77, 0x0f, 0xa6, 0x44, 0x6b, 0x14, - 0xff, 0x47, 0x69, 0x48, 0x8b, 0x87, 0xbb, 0xe8, 0x63, 0x5f, 0x89, 0xe5, 0xce, 0x98, 0x5c, 0xbc, - 0x15, 0x96, 0x7f, 0x48, 0x45, 0x95, 0x58, 0x8a, 0xb0, 0xbc, 0x2b, 0x56, 0xcb, 0xed, 0xaa, 0xb4, - 0x57, 0xad, 0x57, 0xdb, 0x55, 0x89, 0x56, 0x89, 0x0a, 0x1c, 0x5a, 0x87, 0xe2, 0xe1, 0x51, 0xa5, - 0x5e, 0x6b, 0x1d, 0x48, 0x47, 0x0d, 0xfb, 0x2f, 0xd6, 0x9b, 0x42, 0x05, 0x98, 0xab, 0xd7, 0x5a, - 0x6d, 0xd6, 0xd0, 0x2a, 0xa4, 0xad, 0x96, 0xfd, 0x6a, 0x5b, 0xda, 0x2d, 0x1f, 0x96, 0x77, 0x6b, - 0xed, 0xa7, 0x85, 0x0c, 0xe2, 0x61, 0xd5, 0xcf, 0xbb, 0xd5, 0x28, 0x1f, 0xb6, 0x0e, 0x9a, 0xed, - 0x42, 0x16, 0x21, 0x58, 0x20, 0xe3, 0xed, 0xa6, 0x56, 0x21, 0x67, 0x71, 0xd8, 0xad, 0x37, 0x1b, - 0x8e, 0x0c, 0xd3, 0x68, 0x19, 0x0a, 0xf6, 0xcc, 0x62, 0xb5, 0xbc, 0x47, 0x00, 0xbd, 0x19, 0xb4, - 0x08, 0xf3, 0xd5, 0xef, 0x1d, 0x96, 0x1b, 0x7b, 0x36, 0xe1, 0x2c, 0xda, 0x84, 0x75, 0xaf, 0x38, - 0x12, 0x1b, 0x55, 0xdd, 0x23, 0xa0, 0x5c, 0xab, 0x00, 0xe8, 0x1a, 0x14, 0x58, 0x01, 0x6c, 0xb7, - 0xd9, 0xd8, 0xab, 0xb5, 0x6b, 0xcd, 0x46, 0x21, 0x4f, 0x11, 0xbc, 0x25, 0x00, 0x4b, 0x72, 0xc6, - 0x6c, 0x6e, 0x34, 0xac, 0x37, 0x4f, 0x61, 0x3d, 0x1b, 0xb1, 0xfe, 0x79, 0x0a, 0x56, 0x28, 0x64, - 0x6d, 0x03, 0xe4, 0xb6, 0xaf, 0xda, 0x86, 0x02, 0xc5, 0xbb, 0xa4, 0xe0, 0x2d, 0xb0, 0x40, 0xdb, - 0x9f, 0xd8, 0x79, 0x87, 0x5d, 0x5e, 0x4a, 0x79, 0xca, 0x4b, 0xb5, 0x60, 0x16, 0x76, 0xdb, 0x5f, - 0x88, 0x09, 0xcc, 0x96, 0x94, 0xd8, 0x3f, 0x8e, 0x48, 0x13, 0xee, 0x26, 0x73, 0x4b, 0x0a, 0xa1, - 0x2e, 0x92, 0xc5, 0x5f, 0xd0, 0xcb, 0x3d, 0x84, 0xd5, 0xa0, 0xbc, 0xcc, 0xa0, 0xef, 0x84, 0xca, - 0x25, 0x8e, 0xdb, 0x75, 0x68, 0x1d, 0x0a, 0xe1, 0x87, 0x29, 0x98, 0xb1, 0x9b, 0xad, 0xf0, 0xc6, - 0xf2, 0x4b, 0x3e, 0xa4, 0x74, 0xd6, 0x6a, 0x71, 0x80, 0x57, 0x6f, 0xa1, 0x23, 0x15, 0x2c, 0x74, - 0x44, 0x9e, 0x73, 0x3a, 0xf2, 0x9c, 0xbf, 0x0d, 0xf3, 0x1d, 0x4b, 0x7c, 0x45, 0x53, 0x25, 0x53, - 0xe9, 0xdb, 0x40, 0x68, 0xb8, 0x30, 0xd9, 0xb6, 0x1f, 0x1c, 0x88, 0x73, 0xf6, 0x00, 0xab, 0x09, - 0x6d, 0xc2, 0x1c, 0x29, 0x54, 0x4a, 0xa6, 0x26, 0x0d, 0x0d, 0x5c, 0xcc, 0x12, 0x58, 0x08, 0x48, - 0x5b, 0x5b, 0x3b, 0x32, 0x30, 0xba, 0x07, 0x8b, 0x04, 0xc4, 0x97, 0xbc, 0x32, 0xe7, 0x2c, 0x69, - 0x58, 0xd4, 0x44, 0x7a, 0x5b, 0x8e, 0xf4, 0xc2, 0x5f, 0x73, 0xb0, 0x42, 0xe1, 0xb1, 0xa0, 0xfe, - 0x8e, 0xaa, 0xf0, 0x78, 0x55, 0x34, 0x70, 0x7d, 0x46, 0x32, 0x7c, 0x55, 0xe8, 0x40, 0x11, 0x56, - 0x83, 0xf3, 0x31, 0x48, 0xe0, 0xa7, 0x29, 0x58, 0xb6, 0x62, 0x39, 0xbb, 0xe3, 0xb2, 0xc3, 0xed, - 0x09, 0x8e, 0x3e, 0xb0, 0x99, 0x99, 0xd0, 0x66, 0x1e, 0x04, 0x13, 0xee, 0x37, 0xbd, 0xd1, 0x68, - 0x70, 0x05, 0xaf, 0x6a, 0x2f, 0x7f, 0xcc, 0xc1, 0x4a, 0x60, 0x3e, 0x66, 0x60, 0x1f, 0x05, 0x33, - 0x88, 0x9b, 0x31, 0xf2, 0xbd, 0x54, 0x0e, 0xf1, 0x9e, 0x1d, 0xbb, 0x4f, 0x66, 0xc7, 0xff, 0x98, - 0x82, 0xeb, 0xee, 0x2d, 0x48, 0xde, 0x16, 0x74, 0x27, 0x80, 0xc0, 0x2e, 0x56, 0xc2, 0xff, 0x6e, - 0xd0, 0x43, 0xdf, 0x0f, 0x5f, 0xcc, 0x11, 0x22, 0x25, 0x79, 0xea, 0x48, 0xe4, 0x38, 0x33, 0x29, - 0x72, 0x7c, 0x21, 0x0d, 0xf8, 0x35, 0x2f, 0x28, 0xee, 0x17, 0x9f, 0x69, 0xc2, 0x98, 0xd5, 0xa5, - 0xf7, 0xe1, 0x2a, 0x49, 0x17, 0x9c, 0xd7, 0x33, 0x76, 0xc1, 0x9e, 0xfa, 0xd0, 0x19, 0x71, 0xc5, - 0xea, 0x76, 0xde, 0x83, 0xb0, 0x8a, 0x4a, 0x57, 0xf8, 0x2a, 0x03, 0xab, 0x56, 0x3a, 0xd1, 0x32, - 0xe5, 0xd3, 0x49, 0x6a, 0x0d, 0xbf, 0x14, 0x86, 0x6e, 0x53, 0xfe, 0x63, 0x89, 0xe6, 0x3a, 0x0e, - 0x62, 0x8b, 0x4a, 0xb0, 0x64, 0x98, 0xf2, 0x29, 0x71, 0x07, 0xb2, 0x7e, 0x8a, 0x4d, 0x69, 0x20, - 0x9b, 0xcf, 0x99, 0xad, 0x2f, 0xb2, 0xae, 0x36, 0xe9, 0x39, 0x94, 0xcd, 0xe7, 0x97, 0x74, 0x90, - 0xe8, 0x3b, 0x41, 0xa7, 0xf0, 0xd6, 0x88, 0xb5, 0x24, 0xe8, 0xd6, 0xf7, 0x62, 0xe0, 0xfd, 0x77, - 0x46, 0xb0, 0x1c, 0x0d, 0xeb, 0x5f, 0x1c, 0xce, 0xfe, 0x9a, 0x2b, 0x03, 0xd7, 0xe0, 0x6a, 0x68, - 0xf1, 0xec, 0x0a, 0x39, 0x85, 0xa2, 0xd5, 0x75, 0xa4, 0x1a, 0x13, 0xaa, 0x63, 0x8c, 0xc6, 0xa4, - 0x62, 0x34, 0x46, 0x58, 0x83, 0x6b, 0x11, 0x13, 0x31, 0x29, 0xfe, 0x22, 0x4b, 0xc5, 0x98, 0xbc, - 0x48, 0xf5, 0x69, 0x9c, 0x55, 0xbc, 0xeb, 0x3d, 0xf6, 0xc8, 0x7a, 0xce, 0xab, 0xb0, 0x8b, 0x1b, - 0x90, 0xf7, 0xd2, 0xb1, 0x6b, 0xd0, 0x1c, 0x61, 0x38, 0xd9, 0x0b, 0xd5, 0xce, 0x72, 0x81, 0xda, - 0x59, 0xdd, 0x35, 0xaa, 0x69, 0x7f, 0x2c, 0x1c, 0xbb, 0x15, 0x09, 0x66, 0xf5, 0x2c, 0x64, 0x56, - 0x33, 0xfe, 0x82, 0x5c, 0x2c, 0xd3, 0x5f, 0x00, 0xc3, 0x62, 0x4a, 0x1d, 0x59, 0x29, 0x13, 0x9e, - 0x01, 0x4f, 0x35, 0x7e, 0xf2, 0xda, 0x55, 0x40, 0x8d, 0x52, 0x41, 0x35, 0x12, 0xae, 0xc3, 0x5a, - 0x24, 0x6f, 0x36, 0xf5, 0xef, 0x70, 0x54, 0x30, 0x07, 0x14, 0x6b, 0x99, 0xb2, 0x69, 0x8c, 0x3b, - 0x35, 0xeb, 0xf4, 0x4e, 0x4d, 0x9b, 0x88, 0x06, 0x4f, 0x68, 0x12, 0xc2, 0xef, 0x71, 0x74, 0x1f, - 0x82, 0xb2, 0xb0, 0xdb, 0xf6, 0x4d, 0xc8, 0x0e, 0x09, 0xee, 0x4f, 0xa3, 0xae, 0x25, 0xbf, 0x11, - 0x1c, 0x59, 0x5d, 0x22, 0xa5, 0xb8, 0x34, 0x24, 0x55, 0xf8, 0x29, 0x07, 0x79, 0x0f, 0x7f, 0xb4, - 0x0e, 0xb3, 0x0e, 0x54, 0x64, 0x27, 0x48, 0x4e, 0x83, 0x75, 0xfc, 0xa6, 0x66, 0xca, 0x3d, 0xf6, - 0x26, 0x85, 0x7e, 0x58, 0x39, 0xed, 0xd0, 0xc0, 0x34, 0x1c, 0x4e, 0x8b, 0xe4, 0x6f, 0x74, 0x07, - 0x32, 0x43, 0x55, 0x31, 0x89, 0xd9, 0x2f, 0x04, 0xed, 0x99, 0x4c, 0x55, 0x3a, 0x52, 0x15, 0x53, - 0x24, 0x54, 0xc2, 0x6d, 0xc8, 0x58, 0x5f, 0x7e, 0xc8, 0x62, 0x16, 0xb2, 0x95, 0xa7, 0xed, 0x6a, - 0xab, 0xc0, 0x21, 0x80, 0x5c, 0x8d, 0x26, 0xf8, 0x29, 0xa1, 0x6e, 0xbf, 0x4b, 0x75, 0x16, 0x61, - 0xb9, 0x00, 0xf9, 0x58, 0xd5, 0xf4, 0xbe, 0xdc, 0x23, 0x32, 0xcf, 0x88, 0xce, 0x77, 0x7c, 0x39, - 0x85, 0x82, 0x8f, 0xeb, 0xce, 0x89, 0x44, 0x01, 0x4c, 0x9f, 0x51, 0xdd, 0x8a, 0x83, 0x96, 0xca, - 0x91, 0xd0, 0xd2, 0x75, 0xdf, 0x2d, 0x3b, 0x02, 0x54, 0xfa, 0xdb, 0x14, 0xac, 0x44, 0xd2, 0xa1, - 0xf7, 0xbc, 0x70, 0xd2, 0x56, 0x22, 0x4f, 0x2f, 0x90, 0xf4, 0x15, 0x47, 0x81, 0xa4, 0x1d, 0x1f, - 0x90, 0xf4, 0xfa, 0xc8, 0xf1, 0x5e, 0x08, 0xe9, 0xc7, 0x5c, 0x0c, 0x84, 0xd4, 0x6a, 0x97, 0xf7, - 0xab, 0xd2, 0x51, 0x83, 0xfe, 0xeb, 0x40, 0x48, 0xcb, 0x50, 0x70, 0x81, 0x15, 0xa9, 0xd5, 0x2e, - 0x93, 0x47, 0xc6, 0x21, 0xf8, 0x26, 0x1d, 0x09, 0xce, 0x64, 0x46, 0xe3, 0x30, 0x59, 0x4a, 0xb2, - 0x0a, 0x88, 0x8d, 0x7e, 0xdc, 0x3c, 0x6a, 0xb4, 0x25, 0xf2, 0x84, 0xb9, 0x90, 0x73, 0xf0, 0x99, - 0x65, 0x40, 0xec, 0xb4, 0xbc, 0x2f, 0xf1, 0xff, 0x84, 0x83, 0x25, 0x5f, 0x33, 0x3b, 0x3c, 0x4f, - 0x51, 0x9c, 0xf3, 0x15, 0xc5, 0xef, 0xc1, 0xb2, 0x95, 0x31, 0x52, 0x4b, 0x31, 0xa4, 0x01, 0xd6, - 0x09, 0x18, 0xce, 0x74, 0x7e, 0xb1, 0x2f, 0xbf, 0x60, 0x05, 0x83, 0x43, 0xac, 0x5b, 0x8c, 0x2f, - 0x01, 0x12, 0x16, 0xbe, 0x4c, 0xd3, 0xb8, 0x64, 0xe2, 0xbc, 0x66, 0xa4, 0x8f, 0x0a, 0x27, 0x3e, - 0xe9, 0x09, 0x12, 0x9f, 0x18, 0x0f, 0x97, 0x99, 0x28, 0x18, 0x9e, 0xfc, 0x4e, 0x6f, 0xb8, 0xf7, - 0x36, 0x8d, 0x5c, 0xef, 0x78, 0xf5, 0x77, 0x64, 0xa6, 0x95, 0xfb, 0xb2, 0xc2, 0xfd, 0xe8, 0xb2, - 0xf2, 0xe4, 0x32, 0x8d, 0xc7, 0x2e, 0x90, 0x1f, 0x09, 0x77, 0xe0, 0x16, 0x79, 0x56, 0x39, 0x0a, - 0xd0, 0xa6, 0x2e, 0xe9, 0x57, 0xe1, 0xf5, 0x51, 0xd4, 0x6c, 0xfa, 0x7a, 0xa4, 0xff, 0x71, 0x6a, - 0x5b, 0x01, 0x2e, 0x23, 0x5c, 0x11, 0x9d, 0xfc, 0xb7, 0x53, 0xb0, 0x39, 0x6a, 0x1c, 0xfa, 0xd8, - 0xeb, 0x9a, 0xee, 0x8c, 0x3b, 0x9d, 0xd7, 0x4b, 0xfd, 0x01, 0xf3, 0x52, 0x55, 0x9f, 0x97, 0x7a, - 0x67, 0x12, 0x56, 0x5e, 0x87, 0x55, 0x8d, 0xf2, 0x57, 0x6f, 0xc3, 0x1b, 0x7e, 0x58, 0xda, 0xe3, - 0xa3, 0xe8, 0xaf, 0x1f, 0x1c, 0x9c, 0x9a, 0x23, 0x0e, 0x66, 0xc7, 0x87, 0xf6, 0xfe, 0x7e, 0x1a, - 0x36, 0xbd, 0x0f, 0x94, 0xf7, 0xbd, 0x68, 0x5a, 0xd2, 0xaf, 0x05, 0x6e, 0xc3, 0x62, 0x10, 0x29, - 0xb2, 0x1f, 0xe4, 0x5e, 0xf1, 0x43, 0x45, 0x46, 0xd2, 0x03, 0x9c, 0x11, 0x53, 0x27, 0xe7, 0x7f, - 0x61, 0x14, 0xf8, 0x9b, 0x63, 0x33, 0xfe, 0xff, 0x09, 0x08, 0x53, 0xf5, 0xec, 0xc1, 0x56, 0x82, - 0xfc, 0xcc, 0x2c, 0x2a, 0xb0, 0xe0, 0x07, 0x46, 0x99, 0xa6, 0x06, 0x5e, 0xa1, 0xfa, 0x07, 0xcf, - 0xfb, 0xd0, 0x52, 0x3a, 0xdb, 0x3f, 0x73, 0xf6, 0x83, 0x7d, 0x1f, 0xad, 0x75, 0xc2, 0x61, 0xe4, - 0x95, 0x2e, 0x22, 0x08, 0xba, 0xa2, 0x12, 0xcc, 0xda, 0x54, 0x46, 0xf0, 0x09, 0xa8, 0x33, 0xb9, - 0x4b, 0x12, 0x06, 0x8e, 0xd3, 0x17, 0x04, 0x8e, 0x33, 0x41, 0xe0, 0x98, 0xae, 0xed, 0x87, 0x29, - 0xd8, 0xf4, 0xbe, 0x95, 0x8c, 0x54, 0xef, 0x49, 0x16, 0xba, 0x05, 0x73, 0x1e, 0x2a, 0x5b, 0xe3, - 0xf3, 0x2e, 0xee, 0x99, 0xa4, 0xed, 0xa3, 0x24, 0x79, 0x45, 0x20, 0x28, 0xdd, 0x8a, 0x6d, 0xd8, - 0x4a, 0x98, 0x9f, 0x2a, 0x15, 0xa5, 0xfc, 0x41, 0x8a, 0xfc, 0xb6, 0xed, 0xff, 0x6e, 0xc7, 0xe2, - 0x81, 0xc7, 0x44, 0x31, 0x5e, 0xe9, 0x76, 0x29, 0xb0, 0x11, 0x37, 0xf9, 0x25, 0x1b, 0xe0, 0xfd, - 0xff, 0xe6, 0x60, 0xa6, 0xd6, 0xc5, 0xaa, 0x49, 0x83, 0x82, 0x79, 0xdf, 0xaf, 0x1c, 0xd1, 0x7a, - 0xcc, 0x8f, 0x1f, 0xc9, 0x16, 0xf0, 0xd7, 0x13, 0x7f, 0x1a, 0x29, 0x4c, 0xa1, 0x13, 0xcf, 0x2f, - 0x34, 0x7d, 0xcf, 0x05, 0x5e, 0x0b, 0x8d, 0x8c, 0xb8, 0xab, 0xf9, 0x5b, 0x23, 0xa8, 0x9c, 0x79, - 0xde, 0x87, 0x2c, 0xf9, 0xb1, 0x1a, 0x5a, 0x76, 0x7e, 0x30, 0xe7, 0xf9, 0x2d, 0x1b, 0xbf, 0x12, - 0x68, 0xb5, 0xc7, 0xdd, 0xff, 0xfb, 0x59, 0x00, 0xf7, 0x0e, 0x44, 0x8f, 0x60, 0xce, 0xeb, 0xfa, - 0xd0, 0x5a, 0xc2, 0xaf, 0xb5, 0xf8, 0xf5, 0xe8, 0x4e, 0x47, 0xa6, 0x47, 0x30, 0xe7, 0x55, 0x79, - 0x97, 0x59, 0xc4, 0x63, 0x6d, 0x97, 0x59, 0xe4, 0xdb, 0xea, 0x29, 0xd4, 0x83, 0xab, 0x31, 0x4f, - 0x65, 0xd1, 0xeb, 0xe3, 0x3d, 0x28, 0xe6, 0xdf, 0x18, 0xf3, 0xcd, 0xad, 0x30, 0x85, 0x74, 0xb8, - 0x16, 0xfb, 0x42, 0x14, 0x6d, 0x8f, 0xfb, 0x7e, 0x95, 0x7f, 0x73, 0x0c, 0x4a, 0x67, 0xce, 0x21, - 0xf0, 0xf1, 0xcf, 0xd2, 0xd0, 0x9b, 0x63, 0xbf, 0x97, 0xe4, 0x6f, 0x8f, 0xff, 0xca, 0x4d, 0x98, - 0x42, 0x07, 0x90, 0xf7, 0xbc, 0x4f, 0x42, 0x7c, 0xe4, 0xa3, 0x25, 0xca, 0x78, 0x2d, 0xe1, 0x41, - 0x13, 0xe5, 0xe4, 0x79, 0x32, 0xe2, 0x72, 0x0a, 0x3f, 0x7e, 0x71, 0x39, 0x45, 0xbc, 0x31, 0x09, - 0x6e, 0x7f, 0x20, 0x30, 0x8d, 0xda, 0xfe, 0xe8, 0x48, 0x37, 0x6a, 0xfb, 0x63, 0xa2, 0x5c, 0x61, - 0x0a, 0x7d, 0x17, 0x16, 0xfc, 0xb5, 0x60, 0x74, 0x3d, 0xb1, 0xa6, 0xcd, 0x6f, 0xc4, 0x75, 0x7b, - 0x59, 0xfa, 0x2b, 0x89, 0x2e, 0xcb, 0xc8, 0x8a, 0xa6, 0xcb, 0x32, 0xa6, 0x00, 0x39, 0x65, 0xf9, - 0x27, 0x5f, 0x7d, 0xcc, 0xf5, 0x4f, 0x51, 0x65, 0x3d, 0xd7, 0x3f, 0x45, 0x16, 0xd5, 0x84, 0x29, - 0xa4, 0xc0, 0x6a, 0x74, 0x79, 0x06, 0xdd, 0x1a, 0xab, 0xfa, 0xc4, 0xbf, 0x3e, 0x8a, 0xcc, 0x99, - 0xaa, 0x03, 0x4b, 0x11, 0xcf, 0xc7, 0x90, 0x90, 0xf8, 0xb6, 0x8c, 0x4e, 0x72, 0x73, 0x8c, 0xf7, - 0x67, 0x02, 0x71, 0xe6, 0xff, 0x95, 0x86, 0x2b, 0x81, 0xc0, 0x1e, 0xfd, 0x06, 0x07, 0x1b, 0xc9, - 0xc9, 0x0e, 0xba, 0x1b, 0x93, 0x14, 0xc4, 0x28, 0x56, 0x69, 0x5c, 0x72, 0x8f, 0x71, 0x5f, 0x8b, - 0x8d, 0x29, 0xd1, 0xf6, 0xb8, 0x61, 0xb3, 0x47, 0xa3, 0x47, 0x05, 0xa8, 0x64, 0x3b, 0xac, 0x69, - 0x63, 0xa3, 0x0e, 0xb4, 0x3d, 0x6e, 0x60, 0xe4, 0x4e, 0x3b, 0x32, 0x84, 0xa1, 0xd3, 0xf6, 0x60, - 0x35, 0xfa, 0xf6, 0x46, 0xb7, 0xc6, 0x0a, 0x2d, 0x5c, 0xad, 0x4a, 0x0e, 0x02, 0xc8, 0x6c, 0x24, - 0xad, 0xba, 0xff, 0x2f, 0x59, 0xc8, 0x10, 0xa0, 0xa4, 0x0d, 0x57, 0x02, 0xc5, 0x17, 0xb4, 0x91, - 0x5c, 0x92, 0xe2, 0x6f, 0xc4, 0xf6, 0x3b, 0xe7, 0xf7, 0x0c, 0x16, 0x43, 0xe5, 0x14, 0xb4, 0xe9, - 0x1d, 0x17, 0x55, 0xd2, 0xe1, 0xb7, 0x12, 0x28, 0x82, 0xbc, 0xfd, 0x97, 0xda, 0xe6, 0x28, 0xbc, - 0xdf, 0xcf, 0x3b, 0xee, 0x22, 0xfb, 0x8c, 0xe2, 0x52, 0xc1, 0x2b, 0x4c, 0xf0, 0xcb, 0x15, 0x79, - 0x79, 0xdd, 0x4c, 0xa4, 0x71, 0x66, 0xf8, 0xd4, 0x01, 0xc4, 0x3c, 0x70, 0x33, 0xf2, 0x09, 0x17, - 0x09, 0x8b, 0xf3, 0x42, 0x12, 0x89, 0xc3, 0xfe, 0x13, 0x28, 0x04, 0x91, 0x11, 0x74, 0x63, 0x04, - 0x50, 0xc3, 0x6f, 0xc6, 0x13, 0x04, 0x77, 0x26, 0xe8, 0x09, 0x82, 0x52, 0x45, 0x99, 0xff, 0xcd, - 0x44, 0x1a, 0xef, 0x7d, 0xe8, 0xc1, 0x04, 0xdd, 0xfb, 0x30, 0x8c, 0x1f, 0xba, 0xf7, 0x61, 0x04, - 0x88, 0x28, 0x4c, 0xed, 0x3c, 0x00, 0x90, 0x7b, 0x83, 0xe7, 0xb2, 0x84, 0xd5, 0x61, 0x1f, 0xad, - 0x87, 0xd2, 0xb4, 0xaa, 0x3a, 0xec, 0x37, 0x07, 0x56, 0x76, 0x66, 0x14, 0xff, 0x6c, 0x86, 0xe4, - 0x62, 0xb3, 0x64, 0x80, 0xd5, 0xb1, 0x53, 0x87, 0x82, 0x3b, 0x5a, 0x22, 0x81, 0x36, 0xda, 0x8a, - 0xe4, 0x41, 0x5e, 0x4b, 0x06, 0x18, 0x2d, 0x38, 0x8c, 0x48, 0xef, 0xce, 0x47, 0x00, 0x1d, 0x43, - 0x91, 0x68, 0xa4, 0x8f, 0xae, 0x87, 0xf8, 0x3c, 0x54, 0x70, 0xaf, 0x6b, 0xf3, 0xf8, 0x53, 0x26, - 0x4c, 0xc7, 0x50, 0x68, 0x3e, 0xb0, 0xf3, 0x6d, 0xc8, 0x53, 0x61, 0x4e, 0x2c, 0xba, 0x51, 0xe3, - 0x99, 0x0c, 0x74, 0xf5, 0xa4, 0x67, 0xa7, 0x0a, 0xf3, 0x94, 0x01, 0x83, 0xd8, 0xd1, 0x8d, 0x10, - 0x8b, 0xc7, 0xb4, 0x27, 0xc0, 0x64, 0x8e, 0x0c, 0x63, 0x7d, 0x3b, 0x15, 0x98, 0xb3, 0xd9, 0x98, - 0xcf, 0xb5, 0x2e, 0xda, 0x88, 0xe0, 0x62, 0x75, 0x04, 0x98, 0xe4, 0x19, 0x13, 0xab, 0xcb, 0x15, - 0xc5, 0xfe, 0x3f, 0x3e, 0xc2, 0xa2, 0x30, 0x54, 0x29, 0x52, 0x14, 0xd6, 0x57, 0xc9, 0x3e, 0x4b, - 0x77, 0x0c, 0xe5, 0x38, 0x47, 0x06, 0x7d, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x79, - 0xd8, 0xd8, 0x90, 0x46, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IdentityClient is the client API for Identity service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IdentityClient interface { - GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) - GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) - Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) -} - -type identityClient struct { - cc *grpc.ClientConn -} - -func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { - return &identityClient{cc} -} - -func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { - out := new(GetPluginInfoResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { - out := new(GetPluginCapabilitiesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { - out := new(ProbeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetVolumeGroupSnapshotResponse.ProtoReflect.Descriptor instead. +func (*GetVolumeGroupSnapshotResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{71} } -// IdentityServer is the server API for Identity service. -type IdentityServer interface { - GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) - GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) - Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +func (x *GetVolumeGroupSnapshotResponse) GetGroupSnapshot() *VolumeGroupSnapshot { + if x != nil { + return x.GroupSnapshot + } + return nil } -// UnimplementedIdentityServer can be embedded to have forward compatible implementations. -type UnimplementedIdentityServer struct { -} +// BlockMetadata specifies a data range. +type BlockMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented") -} -func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented") + // This is the zero based byte position in the volume or snapshot, + // measured from the start of the object. + // This field is REQUIRED. + ByteOffset int64 `protobuf:"varint,1,opt,name=byte_offset,json=byteOffset,proto3" json:"byte_offset,omitempty"` + // This is the size of the data range. + // size_bytes MUST be greater than zero. + // This field is REQUIRED. + SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` } -func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented") + +func (x *BlockMetadata) Reset() { + *x = BlockMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { - s.RegisterService(&_Identity_serviceDesc, srv) +func (x *BlockMetadata) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Identity/GetPluginInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) +func (*BlockMetadata) ProtoMessage() {} + +func (x *BlockMetadata) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Identity/GetPluginCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use BlockMetadata.ProtoReflect.Descriptor instead. +func (*BlockMetadata) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{72} } -func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ProbeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).Probe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Identity/Probe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) +func (x *BlockMetadata) GetByteOffset() int64 { + if x != nil { + return x.ByteOffset } - return interceptor(ctx, in, info, handler) + return 0 } -var _Identity_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v1.Identity", - HandlerType: (*IdentityServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetPluginInfo", - Handler: _Identity_GetPluginInfo_Handler, - }, - { - MethodName: "GetPluginCapabilities", - Handler: _Identity_GetPluginCapabilities_Handler, - }, - { - MethodName: "Probe", - Handler: _Identity_Probe_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/container-storage-interface/spec/csi.proto", +func (x *BlockMetadata) GetSizeBytes() int64 { + if x != nil { + return x.SizeBytes + } + return 0 } -// ControllerClient is the client API for Controller service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ControllerClient interface { - CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) - DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) - ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) - GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) - ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) - DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) - ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) - ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) - ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) +// The GetMetadataAllocatedRequest message is used to solicit metadata +// on the allocated blocks of a snapshot: i.e. this identifies the +// data ranges that have valid data as they were the target of some +// previous write operation on the volume. +type GetMetadataAllocatedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This is the identifier of the snapshot. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // This indicates the zero based starting byte position in the volume + // snapshot from which the result should be computed. + // It is intended to be used to continue a previously interrupted + // call. + // The CO SHOULD specify this value to be the offset of the byte + // position immediately after the last byte of the last data range + // received, if continuing an interrupted operation, or zero if not. + // The SP MUST ensure that the returned response stream does not + // contain BlockMetadata tuples that end before the requested + // starting_offset: i.e. if S is the requested starting_offset, and + // B0 is block_metadata[0] of the first message in the response + // stream, then (S < B0.byte_offset + B0.size_bytes) must be true. + // This field is REQUIRED. + StartingOffset int64 `protobuf:"varint,2,opt,name=starting_offset,json=startingOffset,proto3" json:"starting_offset,omitempty"` + // This is an optional parameter, and if non-zero it specifies the + // maximum number of tuples to be returned in each + // GetMetadataAllocatedResponse message returned by the RPC stream. + // The plugin will determine an appropriate value if 0, and is + // always free to send less than the requested value. + // This field is OPTIONAL. + MaxResults int32 `protobuf:"varint,3,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"` + // Secrets required by plugin to complete the request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,4,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -type controllerClient struct { - cc *grpc.ClientConn +func (x *GetMetadataAllocatedRequest) Reset() { + *x = GetMetadataAllocatedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewControllerClient(cc *grpc.ClientConn) ControllerClient { - return &controllerClient{cc} +func (x *GetMetadataAllocatedRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { - out := new(CreateVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...) - if err != nil { - return nil, err +func (*GetMetadataAllocatedRequest) ProtoMessage() {} + +func (x *GetMetadataAllocatedRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[73] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { - out := new(DeleteVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetMetadataAllocatedRequest.ProtoReflect.Descriptor instead. +func (*GetMetadataAllocatedRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{73} } -func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { - out := new(ControllerPublishVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedRequest) GetSnapshotId() string { + if x != nil { + return x.SnapshotId } - return out, nil + return "" } -func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { - out := new(ControllerUnpublishVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedRequest) GetStartingOffset() int64 { + if x != nil { + return x.StartingOffset } - return out, nil + return 0 } -func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { - out := new(ValidateVolumeCapabilitiesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedRequest) GetMaxResults() int32 { + if x != nil { + return x.MaxResults } - return out, nil + return 0 } -func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { - out := new(ListVolumesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } - return out, nil + return nil } -func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { - out := new(GetCapacityResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// GetMetadataAllocatedResponse messages are returned in a gRPC stream. +// Cumulatively, they provide information on the allocated data +// ranges in the snapshot. +type GetMetadataAllocatedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This specifies the style used in the BlockMetadata sequence. + // This value must be the same in all such messages returned by + // the stream. + // If block_metadata_type is FIXED_LENGTH, then the size_bytes field + // of each message in the block_metadata list MUST be constant. + // This field is REQUIRED. + BlockMetadataType BlockMetadataType `protobuf:"varint,1,opt,name=block_metadata_type,json=blockMetadataType,proto3,enum=csi.v1.BlockMetadataType" json:"block_metadata_type,omitempty"` + // This returns the capacity of the underlying volume in bytes. + // This value must be the same in all such messages returned by + // the stream. + // This field is REQUIRED. + VolumeCapacityBytes int64 `protobuf:"varint,2,opt,name=volume_capacity_bytes,json=volumeCapacityBytes,proto3" json:"volume_capacity_bytes,omitempty"` + // This is a list of data range tuples. + // If the value of max_results in the GetMetadataAllocatedRequest + // message is greater than zero, then the number of entries in this + // list MUST be less than or equal to that value. + // The SP MUST respect the value of starting_offset in the request. + // The byte_offset fields of adjacent BlockMetadata messages + // MUST be strictly increasing and messages MUST NOT overlap: + // i.e. for any two BlockMetadata messages, A and B, if A is returned + // before B, then (A.byte_offset + A.size_bytes <= B.byte_offset) + // MUST be true. + // This MUST also be true if A and B are from block_metadata lists in + // different GetMetadataAllocatedResponse messages in the gRPC stream. + // This field is OPTIONAL. + BlockMetadata []*BlockMetadata `protobuf:"bytes,3,rep,name=block_metadata,json=blockMetadata,proto3" json:"block_metadata,omitempty"` } -func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { - out := new(ControllerGetCapabilitiesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedResponse) Reset() { + *x = GetMetadataAllocatedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { - out := new(CreateSnapshotResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *GetMetadataAllocatedResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { - out := new(DeleteSnapshotResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...) - if err != nil { - return nil, err +func (*GetMetadataAllocatedResponse) ProtoMessage() {} + +func (x *GetMetadataAllocatedResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { - out := new(ListSnapshotsResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetMetadataAllocatedResponse.ProtoReflect.Descriptor instead. +func (*GetMetadataAllocatedResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{74} } -func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) { - out := new(ControllerExpandVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerExpandVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedResponse) GetBlockMetadataType() BlockMetadataType { + if x != nil { + return x.BlockMetadataType } - return out, nil + return BlockMetadataType_UNKNOWN } -func (c *controllerClient) ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) { - out := new(ControllerGetVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *GetMetadataAllocatedResponse) GetVolumeCapacityBytes() int64 { + if x != nil { + return x.VolumeCapacityBytes } - return out, nil + return 0 } -// ControllerServer is the server API for Controller service. -type ControllerServer interface { - CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) - DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) - ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) - GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) - ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) - CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) - DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) - ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) - ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) - ControllerGetVolume(context.Context, *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) +func (x *GetMetadataAllocatedResponse) GetBlockMetadata() []*BlockMetadata { + if x != nil { + return x.BlockMetadata + } + return nil } -// UnimplementedControllerServer can be embedded to have forward compatible implementations. -type UnimplementedControllerServer struct { -} +// The GetMetadataDeltaRequest message is used to solicit metadata on +// the data ranges that have changed between two snapshots. +type GetMetadataDeltaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented") -} -func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented") -} -func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented") -} -func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented") -} -func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented") -} -func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented") -} -func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented") -} -func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented") -} -func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") -} -func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") + // This is the identifier of the snapshot against which changes + // are to be computed. + // This field is REQUIRED. + BaseSnapshotId string `protobuf:"bytes,1,opt,name=base_snapshot_id,json=baseSnapshotId,proto3" json:"base_snapshot_id,omitempty"` + // This is the identifier of a second snapshot in the same volume, + // created after the base snapshot. + // This field is REQUIRED. + TargetSnapshotId string `protobuf:"bytes,2,opt,name=target_snapshot_id,json=targetSnapshotId,proto3" json:"target_snapshot_id,omitempty"` + // This indicates the zero based starting byte position in the volume + // snapshot from which the result should be computed. + // It is intended to be used to continue a previously interrupted + // call. + // The CO SHOULD specify this value to be the offset of the byte + // position immediately after the last byte of the last data range + // received, if continuing an interrupted operation, or zero if not. + // The SP MUST ensure that the returned response stream does not + // contain BlockMetadata tuples that end before the requested + // starting_offset: i.e. if S is the requested starting_offset, and + // B0 is block_metadata[0] of the first message in the response + // stream, then (S < B0.byte_offset + B0.size_bytes) must be true. + // This field is REQUIRED. + StartingOffset int64 `protobuf:"varint,3,opt,name=starting_offset,json=startingOffset,proto3" json:"starting_offset,omitempty"` + // This is an optional parameter, and if non-zero it specifies the + // maximum number of tuples to be returned in each + // GetMetadataDeltaResponse message returned by the RPC stream. + // The plugin will determine an appropriate value if 0, and is + // always free to send less than the requested value. + // This field is OPTIONAL. + MaxResults int32 `protobuf:"varint,4,opt,name=max_results,json=maxResults,proto3" json:"max_results,omitempty"` + // Secrets required by plugin to complete the request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") + +func (x *GetMetadataDeltaRequest) Reset() { + *x = GetMetadataDeltaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented") + +func (x *GetMetadataDeltaRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedControllerServer) ControllerGetVolume(ctx context.Context, req *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ControllerGetVolume not implemented") + +func (*GetMetadataDeltaRequest) ProtoMessage() {} + +func (x *GetMetadataDeltaRequest) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { - s.RegisterService(&_Controller_serviceDesc, srv) +// Deprecated: Use GetMetadataDeltaRequest.ProtoReflect.Descriptor instead. +func (*GetMetadataDeltaRequest) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{75} } -func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateVolume(ctx, in) +func (x *GetMetadataDeltaRequest) GetBaseSnapshotId() string { + if x != nil { + return x.BaseSnapshotId } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/CreateVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) - } - return interceptor(ctx, in, info, handler) + return "" } -func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteVolume(ctx, in) +func (x *GetMetadataDeltaRequest) GetTargetSnapshotId() string { + if x != nil { + return x.TargetSnapshotId } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/DeleteVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) - } - return interceptor(ctx, in, info, handler) + return "" } -func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerPublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerPublishVolume(ctx, in) +func (x *GetMetadataDeltaRequest) GetStartingOffset() int64 { + if x != nil { + return x.StartingOffset } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ControllerPublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) + return 0 } -func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) +func (x *GetMetadataDeltaRequest) GetMaxResults() int32 { + if x != nil { + return x.MaxResults } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) + return 0 } -func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateVolumeCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) +func (x *GetMetadataDeltaRequest) GetSecrets() map[string]string { + if x != nil { + return x.Secrets } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListVolumesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListVolumes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ListVolumes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) - } - return interceptor(ctx, in, info, handler) +// GetMetadataDeltaResponse messages are returned in a gRPC stream. +// Cumulatively, they provide information on the data ranges that +// have changed between the base and target snapshots specified +// in the GetMetadataDeltaRequest message. +type GetMetadataDeltaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This specifies the style used in the BlockMetadata sequence. + // This value must be the same in all such messages returned by + // the stream. + // If block_metadata_type is FIXED_LENGTH, then the size_bytes field + // of each message in the block_metadata list MUST be constant. + // This field is REQUIRED. + BlockMetadataType BlockMetadataType `protobuf:"varint,1,opt,name=block_metadata_type,json=blockMetadataType,proto3,enum=csi.v1.BlockMetadataType" json:"block_metadata_type,omitempty"` + // This returns the capacity of the underlying volume in bytes. + // This value must be the same in all such messages returned by + // the stream. + // This field is REQUIRED. + VolumeCapacityBytes int64 `protobuf:"varint,2,opt,name=volume_capacity_bytes,json=volumeCapacityBytes,proto3" json:"volume_capacity_bytes,omitempty"` + // This is a list of data range tuples. + // If the value of max_results in the GetMetadataDeltaRequest message + // is greater than zero, then the number of entries in this list MUST + // be less than or equal to that value. + // The SP MUST respect the value of starting_offset in the request. + // The byte_offset fields of adjacent BlockMetadata messages + // MUST be strictly increasing and messages MUST NOT overlap: + // i.e. for any two BlockMetadata messages, A and B, if A is returned + // before B, then (A.byte_offset + A.size_bytes <= B.byte_offset) + // MUST be true. + // This MUST also be true if A and B are from block_metadata lists in + // different GetMetadataDeltaResponse messages in the gRPC stream. + // This field is OPTIONAL. + BlockMetadata []*BlockMetadata `protobuf:"bytes,3,rep,name=block_metadata,json=blockMetadata,proto3" json:"block_metadata,omitempty"` } -func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetCapacityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).GetCapacity(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/GetCapacity", +func (x *GetMetadataDeltaResponse) Reset() { + *x = GetMetadataDeltaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ControllerGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *GetMetadataDeltaResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/CreateSnapshot", +func (*GetMetadataDeltaResponse) ProtoMessage() {} + +func (x *GetMetadataDeltaResponse) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[76] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadataDeltaResponse.ProtoReflect.Descriptor instead. +func (*GetMetadataDeltaResponse) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{76} +} + +func (x *GetMetadataDeltaResponse) GetBlockMetadataType() BlockMetadataType { + if x != nil { + return x.BlockMetadataType } - return interceptor(ctx, in, info, handler) + return BlockMetadataType_UNKNOWN } -func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSnapshotRequest) - if err := dec(in); err != nil { - return nil, err +func (x *GetMetadataDeltaResponse) GetVolumeCapacityBytes() int64 { + if x != nil { + return x.VolumeCapacityBytes } - if interceptor == nil { - return srv.(ControllerServer).DeleteSnapshot(ctx, in) + return 0 +} + +func (x *GetMetadataDeltaResponse) GetBlockMetadata() []*BlockMetadata { + if x != nil { + return x.BlockMetadata } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/DeleteSnapshot", + return nil +} + +type PluginCapability_Service struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` +} + +func (x *PluginCapability_Service) Reset() { + *x = PluginCapability_Service{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) +} + +func (x *PluginCapability_Service) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginCapability_Service) ProtoMessage() {} + +func (x *PluginCapability_Service) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[78] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) +} + +// Deprecated: Use PluginCapability_Service.ProtoReflect.Descriptor instead. +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{4, 0} } -func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSnapshotsRequest) - if err := dec(in); err != nil { - return nil, err +func (x *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if x != nil { + return x.Type } - if interceptor == nil { - return srv.(ControllerServer).ListSnapshots(ctx, in) + return PluginCapability_Service_UNKNOWN +} + +type PluginCapability_VolumeExpansion struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"` +} + +func (x *PluginCapability_VolumeExpansion) Reset() { + *x = PluginCapability_VolumeExpansion{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ListSnapshots", +} + +func (x *PluginCapability_VolumeExpansion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginCapability_VolumeExpansion) ProtoMessage() {} + +func (x *PluginCapability_VolumeExpansion) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[79] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + return mi.MessageOf(x) +} + +// Deprecated: Use PluginCapability_VolumeExpansion.ProtoReflect.Descriptor instead. +func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type { + if x != nil { + return x.Type } - return interceptor(ctx, in, info, handler) + return PluginCapability_VolumeExpansion_UNKNOWN } -func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerExpandVolumeRequest) - if err := dec(in); err != nil { - return nil, err +type VolumeContentSource_SnapshotSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` +} + +func (x *VolumeContentSource_SnapshotSource) Reset() { + *x = VolumeContentSource_SnapshotSource{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if interceptor == nil { - return srv.(ControllerServer).ControllerExpandVolume(ctx, in) +} + +func (x *VolumeContentSource_SnapshotSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} + +func (x *VolumeContentSource_SnapshotSource) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[83] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ControllerExpandVolume", + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeContentSource_SnapshotSource.ProtoReflect.Descriptor instead. +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *VolumeContentSource_SnapshotSource) GetSnapshotId() string { + if x != nil { + return x.SnapshotId } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest)) + return "" +} + +type VolumeContentSource_VolumeSource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` +} + +func (x *VolumeContentSource_VolumeSource) Reset() { + *x = VolumeContentSource_VolumeSource{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return interceptor(ctx, in, info, handler) } -func _Controller_ControllerGetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerGetVolumeRequest) - if err := dec(in); err != nil { - return nil, err +func (x *VolumeContentSource_VolumeSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeContentSource_VolumeSource) ProtoMessage() {} + +func (x *VolumeContentSource_VolumeSource) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[84] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - if interceptor == nil { - return srv.(ControllerServer).ControllerGetVolume(ctx, in) + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeContentSource_VolumeSource.ProtoReflect.Descriptor instead. +func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{8, 1} +} + +func (x *VolumeContentSource_VolumeSource) GetVolumeId() string { + if x != nil { + return x.VolumeId } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Controller/ControllerGetVolume", + return "" +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VolumeCapability_BlockVolume) Reset() { + *x = VolumeCapability_BlockVolume{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerGetVolume(ctx, req.(*ControllerGetVolumeRequest)) +} + +func (x *VolumeCapability_BlockVolume) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VolumeCapability_BlockVolume) ProtoMessage() {} + +func (x *VolumeCapability_BlockVolume) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[85] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -var _Controller_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v1.Controller", - HandlerType: (*ControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateVolume", - Handler: _Controller_CreateVolume_Handler, - }, - { - MethodName: "DeleteVolume", - Handler: _Controller_DeleteVolume_Handler, - }, - { - MethodName: "ControllerPublishVolume", - Handler: _Controller_ControllerPublishVolume_Handler, - }, - { - MethodName: "ControllerUnpublishVolume", - Handler: _Controller_ControllerUnpublishVolume_Handler, - }, - { - MethodName: "ValidateVolumeCapabilities", - Handler: _Controller_ValidateVolumeCapabilities_Handler, - }, - { - MethodName: "ListVolumes", - Handler: _Controller_ListVolumes_Handler, - }, - { - MethodName: "GetCapacity", - Handler: _Controller_GetCapacity_Handler, - }, - { - MethodName: "ControllerGetCapabilities", - Handler: _Controller_ControllerGetCapabilities_Handler, - }, - { - MethodName: "CreateSnapshot", - Handler: _Controller_CreateSnapshot_Handler, - }, - { - MethodName: "DeleteSnapshot", - Handler: _Controller_DeleteSnapshot_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _Controller_ListSnapshots_Handler, - }, - { - MethodName: "ControllerExpandVolume", - Handler: _Controller_ControllerExpandVolume_Handler, - }, - { - MethodName: "ControllerGetVolume", - Handler: _Controller_ControllerGetVolume_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/container-storage-interface/spec/csi.proto", +// Deprecated: Use VolumeCapability_BlockVolume.ProtoReflect.Descriptor instead. +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{10, 0} } -// GroupControllerClient is the client API for GroupController service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GroupControllerClient interface { - GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) - CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) - DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) - GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"` + // If SP has VOLUME_MOUNT_GROUP node capability and CO provides + // this field then SP MUST ensure that the volume_mount_group + // parameter is passed as the group identifier to the underlying + // operating system mount system call, with the understanding + // that the set of available mount call parameters and/or + // mount implementations may vary across operating systems. + // Additionally, new file and/or directory entries written to + // the underlying filesystem SHOULD be permission-labeled in such a + // manner, unless otherwise modified by a workload, that they are + // both readable and writable by said mount group identifier. + // This is an OPTIONAL field. + VolumeMountGroup string `protobuf:"bytes,3,opt,name=volume_mount_group,json=volumeMountGroup,proto3" json:"volume_mount_group,omitempty"` } -type groupControllerClient struct { - cc *grpc.ClientConn +func (x *VolumeCapability_MountVolume) Reset() { + *x = VolumeCapability_MountVolume{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewGroupControllerClient(cc *grpc.ClientConn) GroupControllerClient { - return &groupControllerClient{cc} +func (x *VolumeCapability_MountVolume) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *groupControllerClient) GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) { - out := new(GroupControllerGetCapabilitiesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GroupControllerGetCapabilities", in, out, opts...) - if err != nil { - return nil, err +func (*VolumeCapability_MountVolume) ProtoMessage() {} + +func (x *VolumeCapability_MountVolume) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[86] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) +} + +// Deprecated: Use VolumeCapability_MountVolume.ProtoReflect.Descriptor instead. +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{10, 1} } -func (c *groupControllerClient) CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) { - out := new(CreateVolumeGroupSnapshotResponse) - err := c.cc.Invoke(ctx, "/csi.v1.GroupController/CreateVolumeGroupSnapshot", in, out, opts...) - if err != nil { - return nil, err +func (x *VolumeCapability_MountVolume) GetFsType() string { + if x != nil { + return x.FsType } - return out, nil + return "" } -func (c *groupControllerClient) DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) { - out := new(DeleteVolumeGroupSnapshotResponse) - err := c.cc.Invoke(ctx, "/csi.v1.GroupController/DeleteVolumeGroupSnapshot", in, out, opts...) - if err != nil { - return nil, err +func (x *VolumeCapability_MountVolume) GetMountFlags() []string { + if x != nil { + return x.MountFlags } - return out, nil + return nil } -func (c *groupControllerClient) GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) { - out := new(GetVolumeGroupSnapshotResponse) - err := c.cc.Invoke(ctx, "/csi.v1.GroupController/GetVolumeGroupSnapshot", in, out, opts...) - if err != nil { - return nil, err +func (x *VolumeCapability_MountVolume) GetVolumeMountGroup() string { + if x != nil { + return x.VolumeMountGroup } - return out, nil + return "" } -// GroupControllerServer is the server API for GroupController service. -type GroupControllerServer interface { - GroupControllerGetCapabilities(context.Context, *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) - CreateVolumeGroupSnapshot(context.Context, *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) - DeleteVolumeGroupSnapshot(context.Context, *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) - GetVolumeGroupSnapshot(context.Context, *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` } -// UnimplementedGroupControllerServer can be embedded to have forward compatible implementations. -type UnimplementedGroupControllerServer struct { +func (x *VolumeCapability_AccessMode) Reset() { + *x = VolumeCapability_AccessMode{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedGroupControllerServer) GroupControllerGetCapabilities(ctx context.Context, req *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GroupControllerGetCapabilities not implemented") +func (x *VolumeCapability_AccessMode) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedGroupControllerServer) CreateVolumeGroupSnapshot(ctx context.Context, req *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateVolumeGroupSnapshot not implemented") + +func (*VolumeCapability_AccessMode) ProtoMessage() {} + +func (x *VolumeCapability_AccessMode) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[87] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedGroupControllerServer) DeleteVolumeGroupSnapshot(ctx context.Context, req *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteVolumeGroupSnapshot not implemented") + +// Deprecated: Use VolumeCapability_AccessMode.ProtoReflect.Descriptor instead. +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{10, 2} } -func (*UnimplementedGroupControllerServer) GetVolumeGroupSnapshot(ctx context.Context, req *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetVolumeGroupSnapshot not implemented") + +func (x *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if x != nil { + return x.Mode + } + return VolumeCapability_AccessMode_UNKNOWN } -func RegisterGroupControllerServer(s *grpc.Server, srv GroupControllerServer) { - s.RegisterService(&_GroupController_serviceDesc, srv) +type ValidateVolumeCapabilitiesResponse_Confirmed struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Volume context validated by the plugin. + // This field is OPTIONAL. + VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The volume creation mutable_parameters validated by the plugin. + // This field is OPTIONAL. + MutableParameters map[string]string `protobuf:"bytes,4,rep,name=mutable_parameters,json=mutableParameters,proto3" json:"mutable_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func _GroupController_GroupControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GroupControllerGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.GroupController/GroupControllerGetCapabilities", +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() { + *x = ValidateVolumeCapabilitiesResponse_Confirmed{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[99] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, req.(*GroupControllerGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) } -func _GroupController_CreateVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVolumeGroupSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.GroupController/CreateVolumeGroupSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, req.(*CreateVolumeGroupSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { + return protoimpl.X.MessageStringOf(x) } -func _GroupController_DeleteVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVolumeGroupSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, in) +func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} + +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[99] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.GroupController/DeleteVolumeGroupSnapshot", + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVolumeCapabilitiesResponse_Confirmed.ProtoReflect.Descriptor instead. +func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{22, 0} +} + +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string { + if x != nil { + return x.VolumeContext } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, req.(*DeleteVolumeGroupSnapshotRequest)) + return nil +} + +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability { + if x != nil { + return x.VolumeCapabilities } - return interceptor(ctx, in, info, handler) + return nil } -func _GroupController_GetVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetVolumeGroupSnapshotRequest) - if err := dec(in); err != nil { - return nil, err +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string { + if x != nil { + return x.Parameters } - if interceptor == nil { - return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, in) + return nil +} + +func (x *ValidateVolumeCapabilitiesResponse_Confirmed) GetMutableParameters() map[string]string { + if x != nil { + return x.MutableParameters } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.GroupController/GetVolumeGroupSnapshot", + return nil +} + +type ListVolumesResponse_VolumeStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` +} + +func (x *ListVolumesResponse_VolumeStatus) Reset() { + *x = ListVolumesResponse_VolumeStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, req.(*GetVolumeGroupSnapshotRequest)) +} + +func (x *ListVolumesResponse_VolumeStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListVolumesResponse_VolumeStatus) ProtoMessage() {} + +func (x *ListVolumesResponse_VolumeStatus) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[103] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -var _GroupController_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v1.GroupController", - HandlerType: (*GroupControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GroupControllerGetCapabilities", - Handler: _GroupController_GroupControllerGetCapabilities_Handler, - }, - { - MethodName: "CreateVolumeGroupSnapshot", - Handler: _GroupController_CreateVolumeGroupSnapshot_Handler, - }, - { - MethodName: "DeleteVolumeGroupSnapshot", - Handler: _GroupController_DeleteVolumeGroupSnapshot_Handler, - }, - { - MethodName: "GetVolumeGroupSnapshot", - Handler: _GroupController_GetVolumeGroupSnapshot_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/container-storage-interface/spec/csi.proto", +// Deprecated: Use ListVolumesResponse_VolumeStatus.ProtoReflect.Descriptor instead. +func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{24, 0} } -// NodeClient is the client API for Node service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NodeClient interface { - NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) - NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) - NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) - NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) - NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +func (x *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string { + if x != nil { + return x.PublishedNodeIds + } + return nil } -type nodeClient struct { - cc *grpc.ClientConn +func (x *ListVolumesResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition { + if x != nil { + return x.VolumeCondition + } + return nil } -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} +type ListVolumesResponse_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field is REQUIRED + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + Status *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` } -func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { - out := new(NodeStageVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *ListVolumesResponse_Entry) Reset() { + *x = ListVolumesResponse_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { - out := new(NodeUnstageVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *ListVolumesResponse_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListVolumesResponse_Entry) ProtoMessage() {} + +func (x *ListVolumesResponse_Entry) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[104] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) +} + +// Deprecated: Use ListVolumesResponse_Entry.ProtoReflect.Descriptor instead. +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{24, 1} } -func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { - out := new(NodePublishVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *ListVolumesResponse_Entry) GetVolume() *Volume { + if x != nil { + return x.Volume } - return out, nil + return nil } -func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { - out := new(NodeUnpublishVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus { + if x != nil { + return x.Status } - return out, nil + return nil } -func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) { - out := new(NodeGetVolumeStatsResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...) - if err != nil { - return nil, err +type ControllerGetVolumeResponse_VolumeStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of all the `node_id` of nodes that this volume is + // controller published on. + // This field is OPTIONAL. + // This field MUST be specified if the LIST_VOLUMES_PUBLISHED_NODES + // controller capability is supported. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` + // Information about the current condition of the volume. + // This field is OPTIONAL. + // This field MUST be specified if the + // VOLUME_CONDITION controller capability is supported. + VolumeCondition *VolumeCondition `protobuf:"bytes,2,opt,name=volume_condition,json=volumeCondition,proto3" json:"volume_condition,omitempty"` +} + +func (x *ControllerGetVolumeResponse_VolumeStatus) Reset() { + *x = ControllerGetVolumeResponse_VolumeStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) { - out := new(NodeExpandVolumeResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeExpandVolume", in, out, opts...) - if err != nil { - return nil, err +func (x *ControllerGetVolumeResponse_VolumeStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerGetVolumeResponse_VolumeStatus) ProtoMessage() {} + +func (x *ControllerGetVolumeResponse_VolumeStatus) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[105] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { - out := new(NodeGetCapabilitiesResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...) - if err != nil { - return nil, err +// Deprecated: Use ControllerGetVolumeResponse_VolumeStatus.ProtoReflect.Descriptor instead. +func (*ControllerGetVolumeResponse_VolumeStatus) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{26, 0} +} + +func (x *ControllerGetVolumeResponse_VolumeStatus) GetPublishedNodeIds() []string { + if x != nil { + return x.PublishedNodeIds } - return out, nil + return nil } -func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { - out := new(NodeGetInfoResponse) - err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...) - if err != nil { - return nil, err +func (x *ControllerGetVolumeResponse_VolumeStatus) GetVolumeCondition() *VolumeCondition { + if x != nil { + return x.VolumeCondition } - return out, nil + return nil } -// NodeServer is the server API for Node service. -type NodeServer interface { - NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) - NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) - NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) - NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) - NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) - NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) - NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +type ControllerServiceCapability_RPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` } -// UnimplementedNodeServer can be embedded to have forward compatible implementations. -type UnimplementedNodeServer struct { +func (x *ControllerServiceCapability_RPC) Reset() { + *x = ControllerServiceCapability_RPC{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[109] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented") +func (x *ControllerServiceCapability_RPC) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented") + +func (*ControllerServiceCapability_RPC) ProtoMessage() {} + +func (x *ControllerServiceCapability_RPC) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[109] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented") + +// Deprecated: Use ControllerServiceCapability_RPC.ProtoReflect.Descriptor instead. +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{33, 0} } -func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented") + +func (x *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if x != nil { + return x.Type + } + return ControllerServiceCapability_RPC_UNKNOWN } -func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented") + +type ListSnapshotsResponse_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` } -func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented") + +func (x *ListSnapshotsResponse_Entry) Reset() { + *x = ListSnapshotsResponse_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[114] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented") + +func (x *ListSnapshotsResponse_Entry) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented") + +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} + +func (x *ListSnapshotsResponse_Entry) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[114] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) +// Deprecated: Use ListSnapshotsResponse_Entry.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{40, 0} } -func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeStageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeStageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeStageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) +func (x *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if x != nil { + return x.Snapshot } - return interceptor(ctx, in, info, handler) + return nil } -func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnstageVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnstageVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeUnstageVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) - } - return interceptor(ctx, in, info, handler) +type NodeServiceCapability_RPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` } -func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePublishVolume(ctx, in) +func (x *NodeServiceCapability_RPC) Reset() { + *x = NodeServiceCapability_RPC{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[122] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodePublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *NodeServiceCapability_RPC) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetVolumeStatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetVolumeStats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeGetVolumeStats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest)) +func (*NodeServiceCapability_RPC) ProtoMessage() {} + +func (x *NodeServiceCapability_RPC) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[122] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeExpandVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeExpandVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeExpandVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use NodeServiceCapability_RPC.ProtoReflect.Descriptor instead. +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{57, 0} } -func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err +func (x *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if x != nil { + return x.Type } - if interceptor == nil { - return srv.(NodeServer).NodeGetCapabilities(ctx, in) + return NodeServiceCapability_RPC_UNKNOWN +} + +type GroupControllerServiceCapability_RPC struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type GroupControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.GroupControllerServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (x *GroupControllerServiceCapability_RPC) Reset() { + *x = GroupControllerServiceCapability_RPC{} + if protoimpl.UnsafeEnabled { + mi := &file_csi_proto_msgTypes[124] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeGetCapabilities", +} + +func (x *GroupControllerServiceCapability_RPC) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GroupControllerServiceCapability_RPC) ProtoMessage() {} + +func (x *GroupControllerServiceCapability_RPC) ProtoReflect() protoreflect.Message { + mi := &file_csi_proto_msgTypes[124] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + return mi.MessageOf(x) +} + +// Deprecated: Use GroupControllerServiceCapability_RPC.ProtoReflect.Descriptor instead. +func (*GroupControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return file_csi_proto_rawDescGZIP(), []int{64, 0} +} + +func (x *GroupControllerServiceCapability_RPC) GetType() GroupControllerServiceCapability_RPC_Type { + if x != nil { + return x.Type } - return interceptor(ctx, in, info, handler) + return GroupControllerServiceCapability_RPC_UNKNOWN } -func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetInfoRequest) - if err := dec(in); err != nil { - return nil, err +var file_csi_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_enum", + Tag: "varint,1060,opt,name=alpha_enum", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.EnumValueOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_enum_value", + Tag: "varint,1060,opt,name=alpha_enum_value", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1059, + Name: "csi.v1.csi_secret", + Tag: "varint,1059,opt,name=csi_secret", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_field", + Tag: "varint,1060,opt,name=alpha_field", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_message", + Tag: "varint,1060,opt,name=alpha_message", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_method", + Tag: "varint,1060,opt,name=alpha_method", + Filename: "csi.proto", + }, + { + ExtendedType: (*descriptorpb.ServiceOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1060, + Name: "csi.v1.alpha_service", + Tag: "varint,1060,opt,name=alpha_service", + Filename: "csi.proto", + }, +} + +// Extension fields to descriptorpb.EnumOptions. +var ( + // Indicates that this enum is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + // + // optional bool alpha_enum = 1060; + E_AlphaEnum = &file_csi_proto_extTypes[0] +) + +// Extension fields to descriptorpb.EnumValueOptions. +var ( + // Indicates that this enum value is OPTIONAL and part of an + // experimental API that may be deprecated and eventually removed + // between minor releases. + // + // optional bool alpha_enum_value = 1060; + E_AlphaEnumValue = &file_csi_proto_extTypes[1] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Indicates that a field MAY contain information that is sensitive + // and MUST be treated as such (e.g. not logged). + // + // optional bool csi_secret = 1059; + E_CsiSecret = &file_csi_proto_extTypes[2] + // Indicates that this field is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + // + // optional bool alpha_field = 1060; + E_AlphaField = &file_csi_proto_extTypes[3] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // Indicates that this message is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + // + // optional bool alpha_message = 1060; + E_AlphaMessage = &file_csi_proto_extTypes[4] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // Indicates that this method is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + // + // optional bool alpha_method = 1060; + E_AlphaMethod = &file_csi_proto_extTypes[5] +) + +// Extension fields to descriptorpb.ServiceOptions. +var ( + // Indicates that this service is OPTIONAL and part of an experimental + // API that may be deprecated and eventually removed between minor + // releases. + // + // optional bool alpha_service = 1060; + E_AlphaService = &file_csi_proto_extTypes[6] +) + +var File_csi_proto protoreflect.FileDescriptor + +var file_csi_proto_rawDesc = []byte{ + 0x0a, 0x09, 0x63, 0x73, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xd8, + 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, + 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x1c, 0x47, 0x65, 0x74, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5d, 0x0a, 0x1d, 0x47, 0x65, 0x74, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x04, 0x0a, 0x10, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3c, 0x0a, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x10, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x1a, 0xdf, 0x01, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x39, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x4f, 0x4c, 0x4c, 0x45, 0x52, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x56, 0x4f, 0x4c, 0x55, 0x4d, + 0x45, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, + 0x43, 0x4f, 0x4e, 0x53, 0x54, 0x52, 0x41, 0x49, 0x4e, 0x54, 0x53, 0x10, 0x02, 0x12, 0x21, 0x0a, + 0x18, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x4f, 0x4c, 0x4c, 0x45, + 0x52, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x03, 0x1a, 0x03, 0xa0, 0x42, 0x01, + 0x12, 0x22, 0x0a, 0x19, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x04, 0x1a, + 0x03, 0xa0, 0x42, 0x01, 0x1a, 0x82, 0x01, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, + 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x04, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x4f, 0x46, 0x46, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x41, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x72, + 0x65, 0x61, 0x64, 0x79, 0x22, 0x9e, 0x06, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x61, 0x6e, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x49, + 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x12, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x47, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, + 0x4f, 0x0a, 0x15, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x13, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x5a, 0x0a, 0x1a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, + 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x52, 0x19, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x66, 0x0a, 0x12, + 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xa0, 0x42, + 0x01, 0x52, 0x11, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x44, 0x0a, 0x16, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8b, 0x02, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x48, 0x0a, + 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x08, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x42, 0x0a, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x48, 0x00, 0x52, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x0e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x1a, 0x2b, + 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x22, 0x3e, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x06, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x22, 0x9f, 0x05, 0x0a, 0x10, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3c, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x48, 0x00, 0x52, + 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x3c, 0x0a, 0x05, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x44, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6d, + 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0a, + 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x1a, 0x0d, 0x0a, 0x0b, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x0b, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x73, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x73, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x61, + 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x1a, 0xb3, 0x02, 0x0a, 0x0a, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x3c, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, + 0x64, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0xe6, 0x01, + 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, + 0x44, 0x45, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x53, + 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x45, + 0x52, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x4d, 0x55, 0x4c, 0x54, + 0x49, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x45, 0x52, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x03, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x4e, 0x4f, + 0x44, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, + 0x10, 0x04, 0x12, 0x1b, 0x0a, 0x17, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x4e, 0x4f, 0x44, 0x45, + 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, 0x05, 0x12, + 0x22, 0x0a, 0x19, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x53, + 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, 0x06, 0x1a, 0x03, + 0xa0, 0x42, 0x01, 0x12, 0x21, 0x0a, 0x18, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, + 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, + 0x07, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x57, 0x0a, 0x0d, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xdf, + 0x02, 0x0a, 0x06, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x61, 0x70, + 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x48, 0x0a, + 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x42, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x13, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, + 0x67, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x1a, 0x40, + 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x75, 0x0a, 0x13, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x75, 0x69, + 0x73, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x73, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x09, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x08, 0x54, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x2e, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x1a, 0x3b, 0x0a, 0x0d, 0x53, 0x65, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb7, 0x01, + 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x47, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, + 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xed, 0x03, 0x0a, 0x1e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x10, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, 0x52, 0x0a, 0x07, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, + 0x60, 0x0a, 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x40, 0x0a, + 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xca, 0x01, 0x0a, 0x1f, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x0f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xea, 0x01, 0x0a, + 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x6e, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x54, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x6e, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, + 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, + 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x23, 0x0a, 0x21, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, + 0x06, 0x0a, 0x21, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x63, 0x0a, 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x12, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x59, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, 0x07, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x12, 0x74, 0x0a, 0x12, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x40, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0xa0, 0x42, 0x01, 0x52, 0x11, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x89, 0x06, 0x0a, + 0x22, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x1a, 0xf4, 0x04, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x12, + 0x6e, 0x0a, 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, 0x65, 0x64, 0x2e, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, + 0x49, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x12, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x0a, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x72, 0x6d, 0x65, 0x64, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x7f, 0x0a, 0x12, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x72, 0x6d, + 0x65, 0x64, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xa0, 0x42, 0x01, 0x52, 0x11, + 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5c, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xec, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, + 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x85, 0x01, 0x0a, 0x0c, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, + 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xa0, 0x42, + 0x01, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x71, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x06, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, + 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0x97, 0x02, 0x0a, 0x1b, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x06, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x48, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x80, 0x01, 0x0a, 0x0c, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x4e, + 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x42, 0x0a, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, + 0x83, 0x03, 0x0a, 0x1d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, + 0x64, 0x69, 0x66, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x51, + 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x12, 0x6b, 0x0a, 0x12, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x6d, 0x75, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x3a, + 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4d, 0x75, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0x25, 0x0a, 0x1e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0xad, 0x02, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x12, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x4a, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, + 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x41, 0x0a, 0x13, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x69, 0x62, 0x6c, 0x65, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x1a, 0x3d, 0x0a, + 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe3, 0x01, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x11, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x50, 0x0a, 0x13, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x03, 0xa0, 0x42, 0x01, 0x52, + 0x11, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x22, 0x22, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6c, 0x0a, 0x21, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x63, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x22, 0x96, 0x04, 0x0a, 0x1b, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x03, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, 0x43, 0x48, 0x00, 0x52, 0x03, 0x72, 0x70, + 0x63, 0x1a, 0xb1, 0x03, 0x0a, 0x03, 0x52, 0x50, 0x43, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, 0x43, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xe7, 0x02, 0x0a, 0x04, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, + 0x54, 0x45, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x50, + 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x55, 0x4e, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, + 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x53, + 0x54, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x53, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x47, + 0x45, 0x54, 0x5f, 0x43, 0x41, 0x50, 0x41, 0x43, 0x49, 0x54, 0x59, 0x10, 0x04, 0x12, 0x1a, 0x0a, + 0x16, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x53, + 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x49, 0x53, + 0x54, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x53, 0x10, 0x06, 0x12, 0x10, 0x0a, + 0x0c, 0x43, 0x4c, 0x4f, 0x4e, 0x45, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x07, 0x12, + 0x14, 0x0a, 0x10, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x4f, + 0x4e, 0x4c, 0x59, 0x10, 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x5f, + 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x09, 0x12, 0x20, 0x0a, 0x1c, 0x4c, 0x49, 0x53, 0x54, + 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x53, 0x5f, 0x50, 0x55, 0x42, 0x4c, 0x49, 0x53, 0x48, + 0x45, 0x44, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x53, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x10, 0x56, 0x4f, + 0x4c, 0x55, 0x4d, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x0b, + 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x13, 0x0a, 0x0a, 0x47, 0x45, 0x54, 0x5f, 0x56, 0x4f, 0x4c, + 0x55, 0x4d, 0x45, 0x10, 0x0c, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x21, 0x0a, 0x18, 0x53, 0x49, + 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, 0x0d, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x16, 0x0a, + 0x0d, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x0e, + 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xea, 0x02, + 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, + 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x46, 0x0a, 0x16, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x22, 0x88, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, + 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x72, 0x65, + 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, 0x75, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x6f, 0x55, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x11, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xa0, 0x42, 0x01, 0x52, 0x0f, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x22, 0xbf, 0x01, + 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x14, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x48, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, + 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, 0x0a, 0x15, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x1a, 0x35, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2c, 0x0a, 0x08, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xd0, 0x02, 0x0a, 0x1d, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x61, 0x70, + 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, + 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x51, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, + 0x6e, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, + 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x11, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, + 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7f, 0x0a, + 0x1e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, + 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, + 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x70, + 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0xf0, + 0x04, 0x0a, 0x16, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x5b, 0x0a, 0x0f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, + 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, + 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x73, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x40, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x19, 0x0a, 0x17, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x18, + 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x73, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x50, 0x61, 0x74, 0x68, 0x22, 0x1b, 0x0a, 0x19, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x73, + 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xb5, 0x05, 0x0a, 0x18, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, + 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x5d, 0x0a, 0x0f, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, + 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, + 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x45, 0x0a, 0x11, + 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x52, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x12, + 0x4c, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, + 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x5a, 0x0a, + 0x0e, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, + 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x40, 0x0a, 0x12, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1b, 0x0a, 0x19, 0x4e, 0x6f, + 0x64, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5a, 0x0a, 0x1a, 0x4e, 0x6f, 0x64, 0x65, 0x55, + 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, + 0x61, 0x74, 0x68, 0x22, 0x1d, 0x0a, 0x1b, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x89, 0x01, 0x0a, 0x19, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2e, + 0x0a, 0x13, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, + 0x67, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x22, 0x90, + 0x01, 0x0a, 0x1a, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, + 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xa0, 0x42, 0x01, + 0x52, 0x0f, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0xaf, 0x01, 0x0a, 0x0b, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x55, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x55, 0x6e, 0x69, + 0x74, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0x2a, 0x0a, 0x04, 0x55, 0x6e, 0x69, 0x74, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x4f, 0x44, 0x45, + 0x53, 0x10, 0x02, 0x22, 0x4c, 0x0a, 0x0f, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x62, 0x6e, 0x6f, 0x72, 0x6d, + 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x61, 0x62, 0x6e, 0x6f, 0x72, 0x6d, + 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x3a, 0x03, 0xa0, 0x42, + 0x01, 0x22, 0x1c, 0x0a, 0x1a, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x60, 0x0a, 0x1b, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, + 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x22, 0xc9, 0x02, 0x0a, 0x15, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x03, 0x72, + 0x70, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, 0x43, 0x48, 0x00, 0x52, 0x03, 0x72, + 0x70, 0x63, 0x1a, 0xf0, 0x01, 0x0a, 0x03, 0x52, 0x50, 0x43, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, 0x43, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, + 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x56, 0x4f, + 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x45, 0x54, 0x5f, 0x56, 0x4f, + 0x4c, 0x55, 0x4d, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x53, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, + 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x10, 0x03, 0x12, + 0x19, 0x0a, 0x10, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x21, 0x0a, 0x18, 0x53, 0x49, + 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x4e, 0x4f, 0x44, 0x45, 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, + 0x57, 0x52, 0x49, 0x54, 0x45, 0x52, 0x10, 0x05, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x16, 0x0a, + 0x12, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x5f, 0x4d, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x47, 0x52, + 0x4f, 0x55, 0x50, 0x10, 0x06, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x14, 0x0a, + 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xa2, 0x01, 0x0a, 0x13, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x50, 0x65, + 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x41, 0x0a, 0x13, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, + 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x6f, + 0x6c, 0x6f, 0x67, 0x79, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x69, 0x62, 0x6c, 0x65, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x22, 0x98, 0x03, 0x0a, 0x17, 0x4e, 0x6f, 0x64, + 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x3c, 0x0a, 0x0e, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, + 0x74, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x12, 0x45, 0x0a, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x10, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4e, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x06, 0x98, 0x42, 0x01, 0xa0, 0x42, 0x01, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x41, 0x0a, 0x18, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, + 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2c, 0x0a, 0x25, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x3a, + 0x03, 0xa0, 0x42, 0x01, 0x22, 0x7b, 0x0a, 0x26, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, + 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0c, + 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x3a, 0x03, 0xa0, 0x42, + 0x01, 0x22, 0x87, 0x02, 0x0a, 0x20, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x40, 0x0a, 0x03, 0x72, 0x70, 0x63, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, + 0x43, 0x48, 0x00, 0x52, 0x03, 0x72, 0x70, 0x63, 0x1a, 0x93, 0x01, 0x0a, 0x03, 0x52, 0x50, 0x43, + 0x12, 0x45, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x2e, 0x52, 0x50, 0x43, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x30, 0x0a, 0x27, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x47, 0x45, + 0x54, 0x5f, 0x56, 0x4f, 0x4c, 0x55, 0x4d, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x53, + 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, 0x1a, 0x03, 0xa0, 0x42, 0x01, 0x3a, 0x03, + 0xa0, 0x42, 0x01, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x92, 0x03, 0x0a, 0x20, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x49, 0x64, 0x73, + 0x12, 0x54, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x35, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x03, 0xa0, 0x42, 0x01, + 0x22, 0x6c, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0xd9, + 0x01, 0x0a, 0x13, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, + 0x75, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x79, + 0x54, 0x6f, 0x55, 0x73, 0x65, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0x88, 0x02, 0x0a, 0x20, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2a, 0x0a, 0x11, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x73, 0x12, 0x54, + 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, 0x28, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, + 0x82, 0x02, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, + 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x73, + 0x12, 0x51, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, + 0x03, 0xa0, 0x42, 0x01, 0x22, 0x69, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x0d, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x03, 0xa0, 0x42, 0x01, 0x22, + 0x4f, 0x0a, 0x0d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x22, 0x95, 0x02, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, + 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4f, 0x0a, 0x07, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, + 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, 0x63, + 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x63, + 0x69, 0x74, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc4, 0x02, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, + 0x73, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x4f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x42, 0x03, 0x98, 0x42, 0x01, 0x52, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x01, + 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x5f, + 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, + 0x63, 0x69, 0x74, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x0e, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2a, 0x47, 0x0a, 0x11, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x56, + 0x41, 0x52, 0x49, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x02, + 0x32, 0xfa, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x4e, 0x0a, + 0x0d, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x12, 0x14, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, + 0x6f, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0xbb, 0x0a, + 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x0c, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1b, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1b, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x26, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x12, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x55, + 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x48, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, + 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x47, 0x65, 0x74, + 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x12, 0x1a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, + 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x1d, 0x2e, 0x63, 0x73, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x1d, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, + 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x1c, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, + 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, + 0x22, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x6c, 0x0a, + 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, + 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0xa0, 0x42, 0x01, 0x32, 0xf6, 0x03, 0x0a, 0x0f, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, + 0x81, 0x01, 0x0a, 0x1e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, + 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x73, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0xa0, 0x42, 0x01, 0x12, 0x75, 0x0a, 0x19, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x28, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0xa0, 0x42, + 0x01, 0x12, 0x6c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x25, 0x2e, 0x63, 0x73, + 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0xa0, 0x42, 0x01, 0x1a, + 0x03, 0xa0, 0x42, 0x01, 0x32, 0xd9, 0x01, 0x0a, 0x10, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x65, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x59, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, + 0x65, 0x6c, 0x74, 0x61, 0x12, 0x1f, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x1a, 0x03, 0xa0, 0x42, 0x01, + 0x32, 0xda, 0x05, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x54, 0x0a, 0x0f, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1e, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x56, + 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x11, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x73, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, + 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x55, 0x6e, 0x73, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x73, 0x74, 0x61, 0x67, 0x65, 0x56, 0x6f, 0x6c, 0x75, 0x6d, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x11, 0x4e, + 0x6f, 0x64, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, + 0x12, 0x20, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x50, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x13, 0x4e, 0x6f, 0x64, 0x65, 0x55, + 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x22, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x55, 0x6e, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x55, 0x6e, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x12, 0x4e, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, + 0x21, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x47, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x10, 0x4e, 0x6f, 0x64, 0x65, + 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1f, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x64, + 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x60, 0x0a, 0x13, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, + 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x43, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x1a, 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x47, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, + 0x2e, 0x63, 0x73, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x3a, 0x3c, 0x0a, + 0x0a, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa4, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x4c, 0x0a, 0x10, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0xa4, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x3d, 0x0a, 0x0a, 0x63, 0x73, 0x69, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa3, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, + 0x73, 0x69, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x3f, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa4, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x3a, 0x45, 0x0a, 0x0d, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa4, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x3a, 0x42, 0x0a, 0x0c, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xa4, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x45, 0x0a, 0x0d, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa4, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2d, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x66, 0x61, 0x63, 0x65, 0x2f, 0x73, 0x70, 0x65, 0x63, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x67, + 0x6f, 0x2f, 0x63, 0x73, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_csi_proto_rawDescOnce sync.Once + file_csi_proto_rawDescData = file_csi_proto_rawDesc +) + +func file_csi_proto_rawDescGZIP() []byte { + file_csi_proto_rawDescOnce.Do(func() { + file_csi_proto_rawDescData = protoimpl.X.CompressGZIP(file_csi_proto_rawDescData) + }) + return file_csi_proto_rawDescData +} + +var file_csi_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_csi_proto_msgTypes = make([]protoimpl.MessageInfo, 131) +var file_csi_proto_goTypes = []interface{}{ + (BlockMetadataType)(0), // 0: csi.v1.BlockMetadataType + (PluginCapability_Service_Type)(0), // 1: csi.v1.PluginCapability.Service.Type + (PluginCapability_VolumeExpansion_Type)(0), // 2: csi.v1.PluginCapability.VolumeExpansion.Type + (VolumeCapability_AccessMode_Mode)(0), // 3: csi.v1.VolumeCapability.AccessMode.Mode + (ControllerServiceCapability_RPC_Type)(0), // 4: csi.v1.ControllerServiceCapability.RPC.Type + (VolumeUsage_Unit)(0), // 5: csi.v1.VolumeUsage.Unit + (NodeServiceCapability_RPC_Type)(0), // 6: csi.v1.NodeServiceCapability.RPC.Type + (GroupControllerServiceCapability_RPC_Type)(0), // 7: csi.v1.GroupControllerServiceCapability.RPC.Type + (*GetPluginInfoRequest)(nil), // 8: csi.v1.GetPluginInfoRequest + (*GetPluginInfoResponse)(nil), // 9: csi.v1.GetPluginInfoResponse + (*GetPluginCapabilitiesRequest)(nil), // 10: csi.v1.GetPluginCapabilitiesRequest + (*GetPluginCapabilitiesResponse)(nil), // 11: csi.v1.GetPluginCapabilitiesResponse + (*PluginCapability)(nil), // 12: csi.v1.PluginCapability + (*ProbeRequest)(nil), // 13: csi.v1.ProbeRequest + (*ProbeResponse)(nil), // 14: csi.v1.ProbeResponse + (*CreateVolumeRequest)(nil), // 15: csi.v1.CreateVolumeRequest + (*VolumeContentSource)(nil), // 16: csi.v1.VolumeContentSource + (*CreateVolumeResponse)(nil), // 17: csi.v1.CreateVolumeResponse + (*VolumeCapability)(nil), // 18: csi.v1.VolumeCapability + (*CapacityRange)(nil), // 19: csi.v1.CapacityRange + (*Volume)(nil), // 20: csi.v1.Volume + (*TopologyRequirement)(nil), // 21: csi.v1.TopologyRequirement + (*Topology)(nil), // 22: csi.v1.Topology + (*DeleteVolumeRequest)(nil), // 23: csi.v1.DeleteVolumeRequest + (*DeleteVolumeResponse)(nil), // 24: csi.v1.DeleteVolumeResponse + (*ControllerPublishVolumeRequest)(nil), // 25: csi.v1.ControllerPublishVolumeRequest + (*ControllerPublishVolumeResponse)(nil), // 26: csi.v1.ControllerPublishVolumeResponse + (*ControllerUnpublishVolumeRequest)(nil), // 27: csi.v1.ControllerUnpublishVolumeRequest + (*ControllerUnpublishVolumeResponse)(nil), // 28: csi.v1.ControllerUnpublishVolumeResponse + (*ValidateVolumeCapabilitiesRequest)(nil), // 29: csi.v1.ValidateVolumeCapabilitiesRequest + (*ValidateVolumeCapabilitiesResponse)(nil), // 30: csi.v1.ValidateVolumeCapabilitiesResponse + (*ListVolumesRequest)(nil), // 31: csi.v1.ListVolumesRequest + (*ListVolumesResponse)(nil), // 32: csi.v1.ListVolumesResponse + (*ControllerGetVolumeRequest)(nil), // 33: csi.v1.ControllerGetVolumeRequest + (*ControllerGetVolumeResponse)(nil), // 34: csi.v1.ControllerGetVolumeResponse + (*ControllerModifyVolumeRequest)(nil), // 35: csi.v1.ControllerModifyVolumeRequest + (*ControllerModifyVolumeResponse)(nil), // 36: csi.v1.ControllerModifyVolumeResponse + (*GetCapacityRequest)(nil), // 37: csi.v1.GetCapacityRequest + (*GetCapacityResponse)(nil), // 38: csi.v1.GetCapacityResponse + (*ControllerGetCapabilitiesRequest)(nil), // 39: csi.v1.ControllerGetCapabilitiesRequest + (*ControllerGetCapabilitiesResponse)(nil), // 40: csi.v1.ControllerGetCapabilitiesResponse + (*ControllerServiceCapability)(nil), // 41: csi.v1.ControllerServiceCapability + (*CreateSnapshotRequest)(nil), // 42: csi.v1.CreateSnapshotRequest + (*CreateSnapshotResponse)(nil), // 43: csi.v1.CreateSnapshotResponse + (*Snapshot)(nil), // 44: csi.v1.Snapshot + (*DeleteSnapshotRequest)(nil), // 45: csi.v1.DeleteSnapshotRequest + (*DeleteSnapshotResponse)(nil), // 46: csi.v1.DeleteSnapshotResponse + (*ListSnapshotsRequest)(nil), // 47: csi.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 48: csi.v1.ListSnapshotsResponse + (*ControllerExpandVolumeRequest)(nil), // 49: csi.v1.ControllerExpandVolumeRequest + (*ControllerExpandVolumeResponse)(nil), // 50: csi.v1.ControllerExpandVolumeResponse + (*NodeStageVolumeRequest)(nil), // 51: csi.v1.NodeStageVolumeRequest + (*NodeStageVolumeResponse)(nil), // 52: csi.v1.NodeStageVolumeResponse + (*NodeUnstageVolumeRequest)(nil), // 53: csi.v1.NodeUnstageVolumeRequest + (*NodeUnstageVolumeResponse)(nil), // 54: csi.v1.NodeUnstageVolumeResponse + (*NodePublishVolumeRequest)(nil), // 55: csi.v1.NodePublishVolumeRequest + (*NodePublishVolumeResponse)(nil), // 56: csi.v1.NodePublishVolumeResponse + (*NodeUnpublishVolumeRequest)(nil), // 57: csi.v1.NodeUnpublishVolumeRequest + (*NodeUnpublishVolumeResponse)(nil), // 58: csi.v1.NodeUnpublishVolumeResponse + (*NodeGetVolumeStatsRequest)(nil), // 59: csi.v1.NodeGetVolumeStatsRequest + (*NodeGetVolumeStatsResponse)(nil), // 60: csi.v1.NodeGetVolumeStatsResponse + (*VolumeUsage)(nil), // 61: csi.v1.VolumeUsage + (*VolumeCondition)(nil), // 62: csi.v1.VolumeCondition + (*NodeGetCapabilitiesRequest)(nil), // 63: csi.v1.NodeGetCapabilitiesRequest + (*NodeGetCapabilitiesResponse)(nil), // 64: csi.v1.NodeGetCapabilitiesResponse + (*NodeServiceCapability)(nil), // 65: csi.v1.NodeServiceCapability + (*NodeGetInfoRequest)(nil), // 66: csi.v1.NodeGetInfoRequest + (*NodeGetInfoResponse)(nil), // 67: csi.v1.NodeGetInfoResponse + (*NodeExpandVolumeRequest)(nil), // 68: csi.v1.NodeExpandVolumeRequest + (*NodeExpandVolumeResponse)(nil), // 69: csi.v1.NodeExpandVolumeResponse + (*GroupControllerGetCapabilitiesRequest)(nil), // 70: csi.v1.GroupControllerGetCapabilitiesRequest + (*GroupControllerGetCapabilitiesResponse)(nil), // 71: csi.v1.GroupControllerGetCapabilitiesResponse + (*GroupControllerServiceCapability)(nil), // 72: csi.v1.GroupControllerServiceCapability + (*CreateVolumeGroupSnapshotRequest)(nil), // 73: csi.v1.CreateVolumeGroupSnapshotRequest + (*CreateVolumeGroupSnapshotResponse)(nil), // 74: csi.v1.CreateVolumeGroupSnapshotResponse + (*VolumeGroupSnapshot)(nil), // 75: csi.v1.VolumeGroupSnapshot + (*DeleteVolumeGroupSnapshotRequest)(nil), // 76: csi.v1.DeleteVolumeGroupSnapshotRequest + (*DeleteVolumeGroupSnapshotResponse)(nil), // 77: csi.v1.DeleteVolumeGroupSnapshotResponse + (*GetVolumeGroupSnapshotRequest)(nil), // 78: csi.v1.GetVolumeGroupSnapshotRequest + (*GetVolumeGroupSnapshotResponse)(nil), // 79: csi.v1.GetVolumeGroupSnapshotResponse + (*BlockMetadata)(nil), // 80: csi.v1.BlockMetadata + (*GetMetadataAllocatedRequest)(nil), // 81: csi.v1.GetMetadataAllocatedRequest + (*GetMetadataAllocatedResponse)(nil), // 82: csi.v1.GetMetadataAllocatedResponse + (*GetMetadataDeltaRequest)(nil), // 83: csi.v1.GetMetadataDeltaRequest + (*GetMetadataDeltaResponse)(nil), // 84: csi.v1.GetMetadataDeltaResponse + nil, // 85: csi.v1.GetPluginInfoResponse.ManifestEntry + (*PluginCapability_Service)(nil), // 86: csi.v1.PluginCapability.Service + (*PluginCapability_VolumeExpansion)(nil), // 87: csi.v1.PluginCapability.VolumeExpansion + nil, // 88: csi.v1.CreateVolumeRequest.ParametersEntry + nil, // 89: csi.v1.CreateVolumeRequest.SecretsEntry + nil, // 90: csi.v1.CreateVolumeRequest.MutableParametersEntry + (*VolumeContentSource_SnapshotSource)(nil), // 91: csi.v1.VolumeContentSource.SnapshotSource + (*VolumeContentSource_VolumeSource)(nil), // 92: csi.v1.VolumeContentSource.VolumeSource + (*VolumeCapability_BlockVolume)(nil), // 93: csi.v1.VolumeCapability.BlockVolume + (*VolumeCapability_MountVolume)(nil), // 94: csi.v1.VolumeCapability.MountVolume + (*VolumeCapability_AccessMode)(nil), // 95: csi.v1.VolumeCapability.AccessMode + nil, // 96: csi.v1.Volume.VolumeContextEntry + nil, // 97: csi.v1.Topology.SegmentsEntry + nil, // 98: csi.v1.DeleteVolumeRequest.SecretsEntry + nil, // 99: csi.v1.ControllerPublishVolumeRequest.SecretsEntry + nil, // 100: csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry + nil, // 101: csi.v1.ControllerPublishVolumeResponse.PublishContextEntry + nil, // 102: csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry + nil, // 103: csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry + nil, // 104: csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry + nil, // 105: csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry + nil, // 106: csi.v1.ValidateVolumeCapabilitiesRequest.MutableParametersEntry + (*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), // 107: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed + nil, // 108: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry + nil, // 109: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry + nil, // 110: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.MutableParametersEntry + (*ListVolumesResponse_VolumeStatus)(nil), // 111: csi.v1.ListVolumesResponse.VolumeStatus + (*ListVolumesResponse_Entry)(nil), // 112: csi.v1.ListVolumesResponse.Entry + (*ControllerGetVolumeResponse_VolumeStatus)(nil), // 113: csi.v1.ControllerGetVolumeResponse.VolumeStatus + nil, // 114: csi.v1.ControllerModifyVolumeRequest.SecretsEntry + nil, // 115: csi.v1.ControllerModifyVolumeRequest.MutableParametersEntry + nil, // 116: csi.v1.GetCapacityRequest.ParametersEntry + (*ControllerServiceCapability_RPC)(nil), // 117: csi.v1.ControllerServiceCapability.RPC + nil, // 118: csi.v1.CreateSnapshotRequest.SecretsEntry + nil, // 119: csi.v1.CreateSnapshotRequest.ParametersEntry + nil, // 120: csi.v1.DeleteSnapshotRequest.SecretsEntry + nil, // 121: csi.v1.ListSnapshotsRequest.SecretsEntry + (*ListSnapshotsResponse_Entry)(nil), // 122: csi.v1.ListSnapshotsResponse.Entry + nil, // 123: csi.v1.ControllerExpandVolumeRequest.SecretsEntry + nil, // 124: csi.v1.NodeStageVolumeRequest.PublishContextEntry + nil, // 125: csi.v1.NodeStageVolumeRequest.SecretsEntry + nil, // 126: csi.v1.NodeStageVolumeRequest.VolumeContextEntry + nil, // 127: csi.v1.NodePublishVolumeRequest.PublishContextEntry + nil, // 128: csi.v1.NodePublishVolumeRequest.SecretsEntry + nil, // 129: csi.v1.NodePublishVolumeRequest.VolumeContextEntry + (*NodeServiceCapability_RPC)(nil), // 130: csi.v1.NodeServiceCapability.RPC + nil, // 131: csi.v1.NodeExpandVolumeRequest.SecretsEntry + (*GroupControllerServiceCapability_RPC)(nil), // 132: csi.v1.GroupControllerServiceCapability.RPC + nil, // 133: csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntry + nil, // 134: csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry + nil, // 135: csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntry + nil, // 136: csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntry + nil, // 137: csi.v1.GetMetadataAllocatedRequest.SecretsEntry + nil, // 138: csi.v1.GetMetadataDeltaRequest.SecretsEntry + (*wrapperspb.BoolValue)(nil), // 139: google.protobuf.BoolValue + (*wrapperspb.Int64Value)(nil), // 140: google.protobuf.Int64Value + (*timestamppb.Timestamp)(nil), // 141: google.protobuf.Timestamp + (*descriptorpb.EnumOptions)(nil), // 142: google.protobuf.EnumOptions + (*descriptorpb.EnumValueOptions)(nil), // 143: google.protobuf.EnumValueOptions + (*descriptorpb.FieldOptions)(nil), // 144: google.protobuf.FieldOptions + (*descriptorpb.MessageOptions)(nil), // 145: google.protobuf.MessageOptions + (*descriptorpb.MethodOptions)(nil), // 146: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 147: google.protobuf.ServiceOptions +} +var file_csi_proto_depIdxs = []int32{ + 85, // 0: csi.v1.GetPluginInfoResponse.manifest:type_name -> csi.v1.GetPluginInfoResponse.ManifestEntry + 12, // 1: csi.v1.GetPluginCapabilitiesResponse.capabilities:type_name -> csi.v1.PluginCapability + 86, // 2: csi.v1.PluginCapability.service:type_name -> csi.v1.PluginCapability.Service + 87, // 3: csi.v1.PluginCapability.volume_expansion:type_name -> csi.v1.PluginCapability.VolumeExpansion + 139, // 4: csi.v1.ProbeResponse.ready:type_name -> google.protobuf.BoolValue + 19, // 5: csi.v1.CreateVolumeRequest.capacity_range:type_name -> csi.v1.CapacityRange + 18, // 6: csi.v1.CreateVolumeRequest.volume_capabilities:type_name -> csi.v1.VolumeCapability + 88, // 7: csi.v1.CreateVolumeRequest.parameters:type_name -> csi.v1.CreateVolumeRequest.ParametersEntry + 89, // 8: csi.v1.CreateVolumeRequest.secrets:type_name -> csi.v1.CreateVolumeRequest.SecretsEntry + 16, // 9: csi.v1.CreateVolumeRequest.volume_content_source:type_name -> csi.v1.VolumeContentSource + 21, // 10: csi.v1.CreateVolumeRequest.accessibility_requirements:type_name -> csi.v1.TopologyRequirement + 90, // 11: csi.v1.CreateVolumeRequest.mutable_parameters:type_name -> csi.v1.CreateVolumeRequest.MutableParametersEntry + 91, // 12: csi.v1.VolumeContentSource.snapshot:type_name -> csi.v1.VolumeContentSource.SnapshotSource + 92, // 13: csi.v1.VolumeContentSource.volume:type_name -> csi.v1.VolumeContentSource.VolumeSource + 20, // 14: csi.v1.CreateVolumeResponse.volume:type_name -> csi.v1.Volume + 93, // 15: csi.v1.VolumeCapability.block:type_name -> csi.v1.VolumeCapability.BlockVolume + 94, // 16: csi.v1.VolumeCapability.mount:type_name -> csi.v1.VolumeCapability.MountVolume + 95, // 17: csi.v1.VolumeCapability.access_mode:type_name -> csi.v1.VolumeCapability.AccessMode + 96, // 18: csi.v1.Volume.volume_context:type_name -> csi.v1.Volume.VolumeContextEntry + 16, // 19: csi.v1.Volume.content_source:type_name -> csi.v1.VolumeContentSource + 22, // 20: csi.v1.Volume.accessible_topology:type_name -> csi.v1.Topology + 22, // 21: csi.v1.TopologyRequirement.requisite:type_name -> csi.v1.Topology + 22, // 22: csi.v1.TopologyRequirement.preferred:type_name -> csi.v1.Topology + 97, // 23: csi.v1.Topology.segments:type_name -> csi.v1.Topology.SegmentsEntry + 98, // 24: csi.v1.DeleteVolumeRequest.secrets:type_name -> csi.v1.DeleteVolumeRequest.SecretsEntry + 18, // 25: csi.v1.ControllerPublishVolumeRequest.volume_capability:type_name -> csi.v1.VolumeCapability + 99, // 26: csi.v1.ControllerPublishVolumeRequest.secrets:type_name -> csi.v1.ControllerPublishVolumeRequest.SecretsEntry + 100, // 27: csi.v1.ControllerPublishVolumeRequest.volume_context:type_name -> csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry + 101, // 28: csi.v1.ControllerPublishVolumeResponse.publish_context:type_name -> csi.v1.ControllerPublishVolumeResponse.PublishContextEntry + 102, // 29: csi.v1.ControllerUnpublishVolumeRequest.secrets:type_name -> csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry + 103, // 30: csi.v1.ValidateVolumeCapabilitiesRequest.volume_context:type_name -> csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry + 18, // 31: csi.v1.ValidateVolumeCapabilitiesRequest.volume_capabilities:type_name -> csi.v1.VolumeCapability + 104, // 32: csi.v1.ValidateVolumeCapabilitiesRequest.parameters:type_name -> csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry + 105, // 33: csi.v1.ValidateVolumeCapabilitiesRequest.secrets:type_name -> csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry + 106, // 34: csi.v1.ValidateVolumeCapabilitiesRequest.mutable_parameters:type_name -> csi.v1.ValidateVolumeCapabilitiesRequest.MutableParametersEntry + 107, // 35: csi.v1.ValidateVolumeCapabilitiesResponse.confirmed:type_name -> csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed + 112, // 36: csi.v1.ListVolumesResponse.entries:type_name -> csi.v1.ListVolumesResponse.Entry + 20, // 37: csi.v1.ControllerGetVolumeResponse.volume:type_name -> csi.v1.Volume + 113, // 38: csi.v1.ControllerGetVolumeResponse.status:type_name -> csi.v1.ControllerGetVolumeResponse.VolumeStatus + 114, // 39: csi.v1.ControllerModifyVolumeRequest.secrets:type_name -> csi.v1.ControllerModifyVolumeRequest.SecretsEntry + 115, // 40: csi.v1.ControllerModifyVolumeRequest.mutable_parameters:type_name -> csi.v1.ControllerModifyVolumeRequest.MutableParametersEntry + 18, // 41: csi.v1.GetCapacityRequest.volume_capabilities:type_name -> csi.v1.VolumeCapability + 116, // 42: csi.v1.GetCapacityRequest.parameters:type_name -> csi.v1.GetCapacityRequest.ParametersEntry + 22, // 43: csi.v1.GetCapacityRequest.accessible_topology:type_name -> csi.v1.Topology + 140, // 44: csi.v1.GetCapacityResponse.maximum_volume_size:type_name -> google.protobuf.Int64Value + 140, // 45: csi.v1.GetCapacityResponse.minimum_volume_size:type_name -> google.protobuf.Int64Value + 41, // 46: csi.v1.ControllerGetCapabilitiesResponse.capabilities:type_name -> csi.v1.ControllerServiceCapability + 117, // 47: csi.v1.ControllerServiceCapability.rpc:type_name -> csi.v1.ControllerServiceCapability.RPC + 118, // 48: csi.v1.CreateSnapshotRequest.secrets:type_name -> csi.v1.CreateSnapshotRequest.SecretsEntry + 119, // 49: csi.v1.CreateSnapshotRequest.parameters:type_name -> csi.v1.CreateSnapshotRequest.ParametersEntry + 44, // 50: csi.v1.CreateSnapshotResponse.snapshot:type_name -> csi.v1.Snapshot + 141, // 51: csi.v1.Snapshot.creation_time:type_name -> google.protobuf.Timestamp + 120, // 52: csi.v1.DeleteSnapshotRequest.secrets:type_name -> csi.v1.DeleteSnapshotRequest.SecretsEntry + 121, // 53: csi.v1.ListSnapshotsRequest.secrets:type_name -> csi.v1.ListSnapshotsRequest.SecretsEntry + 122, // 54: csi.v1.ListSnapshotsResponse.entries:type_name -> csi.v1.ListSnapshotsResponse.Entry + 19, // 55: csi.v1.ControllerExpandVolumeRequest.capacity_range:type_name -> csi.v1.CapacityRange + 123, // 56: csi.v1.ControllerExpandVolumeRequest.secrets:type_name -> csi.v1.ControllerExpandVolumeRequest.SecretsEntry + 18, // 57: csi.v1.ControllerExpandVolumeRequest.volume_capability:type_name -> csi.v1.VolumeCapability + 124, // 58: csi.v1.NodeStageVolumeRequest.publish_context:type_name -> csi.v1.NodeStageVolumeRequest.PublishContextEntry + 18, // 59: csi.v1.NodeStageVolumeRequest.volume_capability:type_name -> csi.v1.VolumeCapability + 125, // 60: csi.v1.NodeStageVolumeRequest.secrets:type_name -> csi.v1.NodeStageVolumeRequest.SecretsEntry + 126, // 61: csi.v1.NodeStageVolumeRequest.volume_context:type_name -> csi.v1.NodeStageVolumeRequest.VolumeContextEntry + 127, // 62: csi.v1.NodePublishVolumeRequest.publish_context:type_name -> csi.v1.NodePublishVolumeRequest.PublishContextEntry + 18, // 63: csi.v1.NodePublishVolumeRequest.volume_capability:type_name -> csi.v1.VolumeCapability + 128, // 64: csi.v1.NodePublishVolumeRequest.secrets:type_name -> csi.v1.NodePublishVolumeRequest.SecretsEntry + 129, // 65: csi.v1.NodePublishVolumeRequest.volume_context:type_name -> csi.v1.NodePublishVolumeRequest.VolumeContextEntry + 61, // 66: csi.v1.NodeGetVolumeStatsResponse.usage:type_name -> csi.v1.VolumeUsage + 62, // 67: csi.v1.NodeGetVolumeStatsResponse.volume_condition:type_name -> csi.v1.VolumeCondition + 5, // 68: csi.v1.VolumeUsage.unit:type_name -> csi.v1.VolumeUsage.Unit + 65, // 69: csi.v1.NodeGetCapabilitiesResponse.capabilities:type_name -> csi.v1.NodeServiceCapability + 130, // 70: csi.v1.NodeServiceCapability.rpc:type_name -> csi.v1.NodeServiceCapability.RPC + 22, // 71: csi.v1.NodeGetInfoResponse.accessible_topology:type_name -> csi.v1.Topology + 19, // 72: csi.v1.NodeExpandVolumeRequest.capacity_range:type_name -> csi.v1.CapacityRange + 18, // 73: csi.v1.NodeExpandVolumeRequest.volume_capability:type_name -> csi.v1.VolumeCapability + 131, // 74: csi.v1.NodeExpandVolumeRequest.secrets:type_name -> csi.v1.NodeExpandVolumeRequest.SecretsEntry + 72, // 75: csi.v1.GroupControllerGetCapabilitiesResponse.capabilities:type_name -> csi.v1.GroupControllerServiceCapability + 132, // 76: csi.v1.GroupControllerServiceCapability.rpc:type_name -> csi.v1.GroupControllerServiceCapability.RPC + 133, // 77: csi.v1.CreateVolumeGroupSnapshotRequest.secrets:type_name -> csi.v1.CreateVolumeGroupSnapshotRequest.SecretsEntry + 134, // 78: csi.v1.CreateVolumeGroupSnapshotRequest.parameters:type_name -> csi.v1.CreateVolumeGroupSnapshotRequest.ParametersEntry + 75, // 79: csi.v1.CreateVolumeGroupSnapshotResponse.group_snapshot:type_name -> csi.v1.VolumeGroupSnapshot + 44, // 80: csi.v1.VolumeGroupSnapshot.snapshots:type_name -> csi.v1.Snapshot + 141, // 81: csi.v1.VolumeGroupSnapshot.creation_time:type_name -> google.protobuf.Timestamp + 135, // 82: csi.v1.DeleteVolumeGroupSnapshotRequest.secrets:type_name -> csi.v1.DeleteVolumeGroupSnapshotRequest.SecretsEntry + 136, // 83: csi.v1.GetVolumeGroupSnapshotRequest.secrets:type_name -> csi.v1.GetVolumeGroupSnapshotRequest.SecretsEntry + 75, // 84: csi.v1.GetVolumeGroupSnapshotResponse.group_snapshot:type_name -> csi.v1.VolumeGroupSnapshot + 137, // 85: csi.v1.GetMetadataAllocatedRequest.secrets:type_name -> csi.v1.GetMetadataAllocatedRequest.SecretsEntry + 0, // 86: csi.v1.GetMetadataAllocatedResponse.block_metadata_type:type_name -> csi.v1.BlockMetadataType + 80, // 87: csi.v1.GetMetadataAllocatedResponse.block_metadata:type_name -> csi.v1.BlockMetadata + 138, // 88: csi.v1.GetMetadataDeltaRequest.secrets:type_name -> csi.v1.GetMetadataDeltaRequest.SecretsEntry + 0, // 89: csi.v1.GetMetadataDeltaResponse.block_metadata_type:type_name -> csi.v1.BlockMetadataType + 80, // 90: csi.v1.GetMetadataDeltaResponse.block_metadata:type_name -> csi.v1.BlockMetadata + 1, // 91: csi.v1.PluginCapability.Service.type:type_name -> csi.v1.PluginCapability.Service.Type + 2, // 92: csi.v1.PluginCapability.VolumeExpansion.type:type_name -> csi.v1.PluginCapability.VolumeExpansion.Type + 3, // 93: csi.v1.VolumeCapability.AccessMode.mode:type_name -> csi.v1.VolumeCapability.AccessMode.Mode + 108, // 94: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.volume_context:type_name -> csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry + 18, // 95: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.volume_capabilities:type_name -> csi.v1.VolumeCapability + 109, // 96: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.parameters:type_name -> csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry + 110, // 97: csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.mutable_parameters:type_name -> csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.MutableParametersEntry + 62, // 98: csi.v1.ListVolumesResponse.VolumeStatus.volume_condition:type_name -> csi.v1.VolumeCondition + 20, // 99: csi.v1.ListVolumesResponse.Entry.volume:type_name -> csi.v1.Volume + 111, // 100: csi.v1.ListVolumesResponse.Entry.status:type_name -> csi.v1.ListVolumesResponse.VolumeStatus + 62, // 101: csi.v1.ControllerGetVolumeResponse.VolumeStatus.volume_condition:type_name -> csi.v1.VolumeCondition + 4, // 102: csi.v1.ControllerServiceCapability.RPC.type:type_name -> csi.v1.ControllerServiceCapability.RPC.Type + 44, // 103: csi.v1.ListSnapshotsResponse.Entry.snapshot:type_name -> csi.v1.Snapshot + 6, // 104: csi.v1.NodeServiceCapability.RPC.type:type_name -> csi.v1.NodeServiceCapability.RPC.Type + 7, // 105: csi.v1.GroupControllerServiceCapability.RPC.type:type_name -> csi.v1.GroupControllerServiceCapability.RPC.Type + 142, // 106: csi.v1.alpha_enum:extendee -> google.protobuf.EnumOptions + 143, // 107: csi.v1.alpha_enum_value:extendee -> google.protobuf.EnumValueOptions + 144, // 108: csi.v1.csi_secret:extendee -> google.protobuf.FieldOptions + 144, // 109: csi.v1.alpha_field:extendee -> google.protobuf.FieldOptions + 145, // 110: csi.v1.alpha_message:extendee -> google.protobuf.MessageOptions + 146, // 111: csi.v1.alpha_method:extendee -> google.protobuf.MethodOptions + 147, // 112: csi.v1.alpha_service:extendee -> google.protobuf.ServiceOptions + 8, // 113: csi.v1.Identity.GetPluginInfo:input_type -> csi.v1.GetPluginInfoRequest + 10, // 114: csi.v1.Identity.GetPluginCapabilities:input_type -> csi.v1.GetPluginCapabilitiesRequest + 13, // 115: csi.v1.Identity.Probe:input_type -> csi.v1.ProbeRequest + 15, // 116: csi.v1.Controller.CreateVolume:input_type -> csi.v1.CreateVolumeRequest + 23, // 117: csi.v1.Controller.DeleteVolume:input_type -> csi.v1.DeleteVolumeRequest + 25, // 118: csi.v1.Controller.ControllerPublishVolume:input_type -> csi.v1.ControllerPublishVolumeRequest + 27, // 119: csi.v1.Controller.ControllerUnpublishVolume:input_type -> csi.v1.ControllerUnpublishVolumeRequest + 29, // 120: csi.v1.Controller.ValidateVolumeCapabilities:input_type -> csi.v1.ValidateVolumeCapabilitiesRequest + 31, // 121: csi.v1.Controller.ListVolumes:input_type -> csi.v1.ListVolumesRequest + 37, // 122: csi.v1.Controller.GetCapacity:input_type -> csi.v1.GetCapacityRequest + 39, // 123: csi.v1.Controller.ControllerGetCapabilities:input_type -> csi.v1.ControllerGetCapabilitiesRequest + 42, // 124: csi.v1.Controller.CreateSnapshot:input_type -> csi.v1.CreateSnapshotRequest + 45, // 125: csi.v1.Controller.DeleteSnapshot:input_type -> csi.v1.DeleteSnapshotRequest + 47, // 126: csi.v1.Controller.ListSnapshots:input_type -> csi.v1.ListSnapshotsRequest + 49, // 127: csi.v1.Controller.ControllerExpandVolume:input_type -> csi.v1.ControllerExpandVolumeRequest + 33, // 128: csi.v1.Controller.ControllerGetVolume:input_type -> csi.v1.ControllerGetVolumeRequest + 35, // 129: csi.v1.Controller.ControllerModifyVolume:input_type -> csi.v1.ControllerModifyVolumeRequest + 70, // 130: csi.v1.GroupController.GroupControllerGetCapabilities:input_type -> csi.v1.GroupControllerGetCapabilitiesRequest + 73, // 131: csi.v1.GroupController.CreateVolumeGroupSnapshot:input_type -> csi.v1.CreateVolumeGroupSnapshotRequest + 76, // 132: csi.v1.GroupController.DeleteVolumeGroupSnapshot:input_type -> csi.v1.DeleteVolumeGroupSnapshotRequest + 78, // 133: csi.v1.GroupController.GetVolumeGroupSnapshot:input_type -> csi.v1.GetVolumeGroupSnapshotRequest + 81, // 134: csi.v1.SnapshotMetadata.GetMetadataAllocated:input_type -> csi.v1.GetMetadataAllocatedRequest + 83, // 135: csi.v1.SnapshotMetadata.GetMetadataDelta:input_type -> csi.v1.GetMetadataDeltaRequest + 51, // 136: csi.v1.Node.NodeStageVolume:input_type -> csi.v1.NodeStageVolumeRequest + 53, // 137: csi.v1.Node.NodeUnstageVolume:input_type -> csi.v1.NodeUnstageVolumeRequest + 55, // 138: csi.v1.Node.NodePublishVolume:input_type -> csi.v1.NodePublishVolumeRequest + 57, // 139: csi.v1.Node.NodeUnpublishVolume:input_type -> csi.v1.NodeUnpublishVolumeRequest + 59, // 140: csi.v1.Node.NodeGetVolumeStats:input_type -> csi.v1.NodeGetVolumeStatsRequest + 68, // 141: csi.v1.Node.NodeExpandVolume:input_type -> csi.v1.NodeExpandVolumeRequest + 63, // 142: csi.v1.Node.NodeGetCapabilities:input_type -> csi.v1.NodeGetCapabilitiesRequest + 66, // 143: csi.v1.Node.NodeGetInfo:input_type -> csi.v1.NodeGetInfoRequest + 9, // 144: csi.v1.Identity.GetPluginInfo:output_type -> csi.v1.GetPluginInfoResponse + 11, // 145: csi.v1.Identity.GetPluginCapabilities:output_type -> csi.v1.GetPluginCapabilitiesResponse + 14, // 146: csi.v1.Identity.Probe:output_type -> csi.v1.ProbeResponse + 17, // 147: csi.v1.Controller.CreateVolume:output_type -> csi.v1.CreateVolumeResponse + 24, // 148: csi.v1.Controller.DeleteVolume:output_type -> csi.v1.DeleteVolumeResponse + 26, // 149: csi.v1.Controller.ControllerPublishVolume:output_type -> csi.v1.ControllerPublishVolumeResponse + 28, // 150: csi.v1.Controller.ControllerUnpublishVolume:output_type -> csi.v1.ControllerUnpublishVolumeResponse + 30, // 151: csi.v1.Controller.ValidateVolumeCapabilities:output_type -> csi.v1.ValidateVolumeCapabilitiesResponse + 32, // 152: csi.v1.Controller.ListVolumes:output_type -> csi.v1.ListVolumesResponse + 38, // 153: csi.v1.Controller.GetCapacity:output_type -> csi.v1.GetCapacityResponse + 40, // 154: csi.v1.Controller.ControllerGetCapabilities:output_type -> csi.v1.ControllerGetCapabilitiesResponse + 43, // 155: csi.v1.Controller.CreateSnapshot:output_type -> csi.v1.CreateSnapshotResponse + 46, // 156: csi.v1.Controller.DeleteSnapshot:output_type -> csi.v1.DeleteSnapshotResponse + 48, // 157: csi.v1.Controller.ListSnapshots:output_type -> csi.v1.ListSnapshotsResponse + 50, // 158: csi.v1.Controller.ControllerExpandVolume:output_type -> csi.v1.ControllerExpandVolumeResponse + 34, // 159: csi.v1.Controller.ControllerGetVolume:output_type -> csi.v1.ControllerGetVolumeResponse + 36, // 160: csi.v1.Controller.ControllerModifyVolume:output_type -> csi.v1.ControllerModifyVolumeResponse + 71, // 161: csi.v1.GroupController.GroupControllerGetCapabilities:output_type -> csi.v1.GroupControllerGetCapabilitiesResponse + 74, // 162: csi.v1.GroupController.CreateVolumeGroupSnapshot:output_type -> csi.v1.CreateVolumeGroupSnapshotResponse + 77, // 163: csi.v1.GroupController.DeleteVolumeGroupSnapshot:output_type -> csi.v1.DeleteVolumeGroupSnapshotResponse + 79, // 164: csi.v1.GroupController.GetVolumeGroupSnapshot:output_type -> csi.v1.GetVolumeGroupSnapshotResponse + 82, // 165: csi.v1.SnapshotMetadata.GetMetadataAllocated:output_type -> csi.v1.GetMetadataAllocatedResponse + 84, // 166: csi.v1.SnapshotMetadata.GetMetadataDelta:output_type -> csi.v1.GetMetadataDeltaResponse + 52, // 167: csi.v1.Node.NodeStageVolume:output_type -> csi.v1.NodeStageVolumeResponse + 54, // 168: csi.v1.Node.NodeUnstageVolume:output_type -> csi.v1.NodeUnstageVolumeResponse + 56, // 169: csi.v1.Node.NodePublishVolume:output_type -> csi.v1.NodePublishVolumeResponse + 58, // 170: csi.v1.Node.NodeUnpublishVolume:output_type -> csi.v1.NodeUnpublishVolumeResponse + 60, // 171: csi.v1.Node.NodeGetVolumeStats:output_type -> csi.v1.NodeGetVolumeStatsResponse + 69, // 172: csi.v1.Node.NodeExpandVolume:output_type -> csi.v1.NodeExpandVolumeResponse + 64, // 173: csi.v1.Node.NodeGetCapabilities:output_type -> csi.v1.NodeGetCapabilitiesResponse + 67, // 174: csi.v1.Node.NodeGetInfo:output_type -> csi.v1.NodeGetInfoResponse + 144, // [144:175] is the sub-list for method output_type + 113, // [113:144] is the sub-list for method input_type + 113, // [113:113] is the sub-list for extension type_name + 106, // [106:113] is the sub-list for extension extendee + 0, // [0:106] is the sub-list for field type_name +} + +func init() { file_csi_proto_init() } +func file_csi_proto_init() { + if File_csi_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_csi_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPluginInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPluginInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPluginCapabilitiesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPluginCapabilitiesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginCapability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProbeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProbeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeContentSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCapability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CapacityRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Volume); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Topology); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPublishVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPublishVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerUnpublishVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerUnpublishVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVolumeCapabilitiesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVolumeCapabilitiesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListVolumesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListVolumesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerGetVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerGetVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerModifyVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerModifyVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCapacityRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCapacityResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerGetCapabilitiesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerGetCapabilitiesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerServiceCapability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Snapshot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerExpandVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerExpandVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeStageVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeStageVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeUnstageVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeUnstageVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodePublishVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodePublishVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeUnpublishVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeUnpublishVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetVolumeStatsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetVolumeStatsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCondition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetCapabilitiesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetCapabilitiesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeServiceCapability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeGetInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeExpandVolumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeExpandVolumeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GroupControllerGetCapabilitiesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GroupControllerGetCapabilitiesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GroupControllerServiceCapability); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeGroupSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateVolumeGroupSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeGroupSnapshot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteVolumeGroupSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteVolumeGroupSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeGroupSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVolumeGroupSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlockMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadataAllocatedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadataAllocatedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadataDeltaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadataDeltaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginCapability_Service); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginCapability_VolumeExpansion); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeContentSource_SnapshotSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeContentSource_VolumeSource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCapability_BlockVolume); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCapability_MountVolume); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VolumeCapability_AccessMode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVolumeCapabilitiesResponse_Confirmed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListVolumesResponse_VolumeStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListVolumesResponse_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerGetVolumeResponse_VolumeStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerServiceCapability_RPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsResponse_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeServiceCapability_RPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_csi_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GroupControllerServiceCapability_RPC); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_csi_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*PluginCapability_Service_)(nil), + (*PluginCapability_VolumeExpansion_)(nil), } - if interceptor == nil { - return srv.(NodeServer).NodeGetInfo(ctx, in) + file_csi_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + (*VolumeContentSource_Volume)(nil), } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.v1.Node/NodeGetInfo", + file_csi_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + file_csi_proto_msgTypes[33].OneofWrappers = []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.v1.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodeStageVolume", - Handler: _Node_NodeStageVolume_Handler, - }, - { - MethodName: "NodeUnstageVolume", - Handler: _Node_NodeUnstageVolume_Handler, - }, - { - MethodName: "NodePublishVolume", - Handler: _Node_NodePublishVolume_Handler, - }, - { - MethodName: "NodeUnpublishVolume", - Handler: _Node_NodeUnpublishVolume_Handler, - }, - { - MethodName: "NodeGetVolumeStats", - Handler: _Node_NodeGetVolumeStats_Handler, - }, - { - MethodName: "NodeExpandVolume", - Handler: _Node_NodeExpandVolume_Handler, - }, - { - MethodName: "NodeGetCapabilities", - Handler: _Node_NodeGetCapabilities_Handler, - }, - { - MethodName: "NodeGetInfo", - Handler: _Node_NodeGetInfo_Handler, + file_csi_proto_msgTypes[57].OneofWrappers = []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } + file_csi_proto_msgTypes[64].OneofWrappers = []interface{}{ + (*GroupControllerServiceCapability_Rpc)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_csi_proto_rawDesc, + NumEnums: 8, + NumMessages: 131, + NumExtensions: 7, + NumServices: 5, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/container-storage-interface/spec/csi.proto", + GoTypes: file_csi_proto_goTypes, + DependencyIndexes: file_csi_proto_depIdxs, + EnumInfos: file_csi_proto_enumTypes, + MessageInfos: file_csi_proto_msgTypes, + ExtensionInfos: file_csi_proto_extTypes, + }.Build() + File_csi_proto = out.File + file_csi_proto_rawDesc = nil + file_csi_proto_goTypes = nil + file_csi_proto_depIdxs = nil } diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi_grpc.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi_grpc.pb.go new file mode 100644 index 00000000..8d709194 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi_grpc.pb.go @@ -0,0 +1,1487 @@ +// Code generated by make; DO NOT EDIT. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.25.2 +// source: csi.proto + +package csi + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + Identity_GetPluginInfo_FullMethodName = "/csi.v1.Identity/GetPluginInfo" + Identity_GetPluginCapabilities_FullMethodName = "/csi.v1.Identity/GetPluginCapabilities" + Identity_Probe_FullMethodName = "/csi.v1.Identity/Probe" +) + +// IdentityClient is the client API for Identity service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc grpc.ClientConnInterface +} + +func NewIdentityClient(cc grpc.ClientConnInterface) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := c.cc.Invoke(ctx, Identity_GetPluginInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := c.cc.Invoke(ctx, Identity_GetPluginCapabilities_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := c.cc.Invoke(ctx, Identity_Probe_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IdentityServer is the server API for Identity service. +// All implementations must embed UnimplementedIdentityServer +// for forward compatibility +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) + mustEmbedUnimplementedIdentityServer() +} + +// UnimplementedIdentityServer must be embedded to have forward compatible implementations. +type UnimplementedIdentityServer struct { +} + +func (UnimplementedIdentityServer) GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented") +} +func (UnimplementedIdentityServer) GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented") +} +func (UnimplementedIdentityServer) Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented") +} +func (UnimplementedIdentityServer) mustEmbedUnimplementedIdentityServer() {} + +// UnsafeIdentityServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to IdentityServer will +// result in compilation errors. +type UnsafeIdentityServer interface { + mustEmbedUnimplementedIdentityServer() +} + +func RegisterIdentityServer(s grpc.ServiceRegistrar, srv IdentityServer) { + s.RegisterService(&Identity_ServiceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Identity_GetPluginInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Identity_GetPluginCapabilities_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Identity_Probe_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Identity_ServiceDesc is the grpc.ServiceDesc for Identity service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Identity_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +const ( + Controller_CreateVolume_FullMethodName = "/csi.v1.Controller/CreateVolume" + Controller_DeleteVolume_FullMethodName = "/csi.v1.Controller/DeleteVolume" + Controller_ControllerPublishVolume_FullMethodName = "/csi.v1.Controller/ControllerPublishVolume" + Controller_ControllerUnpublishVolume_FullMethodName = "/csi.v1.Controller/ControllerUnpublishVolume" + Controller_ValidateVolumeCapabilities_FullMethodName = "/csi.v1.Controller/ValidateVolumeCapabilities" + Controller_ListVolumes_FullMethodName = "/csi.v1.Controller/ListVolumes" + Controller_GetCapacity_FullMethodName = "/csi.v1.Controller/GetCapacity" + Controller_ControllerGetCapabilities_FullMethodName = "/csi.v1.Controller/ControllerGetCapabilities" + Controller_CreateSnapshot_FullMethodName = "/csi.v1.Controller/CreateSnapshot" + Controller_DeleteSnapshot_FullMethodName = "/csi.v1.Controller/DeleteSnapshot" + Controller_ListSnapshots_FullMethodName = "/csi.v1.Controller/ListSnapshots" + Controller_ControllerExpandVolume_FullMethodName = "/csi.v1.Controller/ControllerExpandVolume" + Controller_ControllerGetVolume_FullMethodName = "/csi.v1.Controller/ControllerGetVolume" + Controller_ControllerModifyVolume_FullMethodName = "/csi.v1.Controller/ControllerModifyVolume" +) + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) + ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) + ControllerModifyVolume(ctx context.Context, in *ControllerModifyVolumeRequest, opts ...grpc.CallOption) (*ControllerModifyVolumeResponse, error) +} + +type controllerClient struct { + cc grpc.ClientConnInterface +} + +func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := c.cc.Invoke(ctx, Controller_CreateVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := c.cc.Invoke(ctx, Controller_DeleteVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := c.cc.Invoke(ctx, Controller_ControllerPublishVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, Controller_ControllerUnpublishVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := c.cc.Invoke(ctx, Controller_ValidateVolumeCapabilities_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := c.cc.Invoke(ctx, Controller_ListVolumes_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := c.cc.Invoke(ctx, Controller_GetCapacity_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, Controller_ControllerGetCapabilities_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := c.cc.Invoke(ctx, Controller_CreateSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := c.cc.Invoke(ctx, Controller_DeleteSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, Controller_ListSnapshots_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) { + out := new(ControllerExpandVolumeResponse) + err := c.cc.Invoke(ctx, Controller_ControllerExpandVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetVolume(ctx context.Context, in *ControllerGetVolumeRequest, opts ...grpc.CallOption) (*ControllerGetVolumeResponse, error) { + out := new(ControllerGetVolumeResponse) + err := c.cc.Invoke(ctx, Controller_ControllerGetVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerModifyVolume(ctx context.Context, in *ControllerModifyVolumeRequest, opts ...grpc.CallOption) (*ControllerModifyVolumeResponse, error) { + out := new(ControllerModifyVolumeResponse) + err := c.cc.Invoke(ctx, Controller_ControllerModifyVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +// All implementations must embed UnimplementedControllerServer +// for forward compatibility +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) + ControllerGetVolume(context.Context, *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) + ControllerModifyVolume(context.Context, *ControllerModifyVolumeRequest) (*ControllerModifyVolumeResponse, error) + mustEmbedUnimplementedControllerServer() +} + +// UnimplementedControllerServer must be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (UnimplementedControllerServer) CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented") +} +func (UnimplementedControllerServer) DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented") +} +func (UnimplementedControllerServer) ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented") +} +func (UnimplementedControllerServer) ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented") +} +func (UnimplementedControllerServer) ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented") +} +func (UnimplementedControllerServer) ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented") +} +func (UnimplementedControllerServer) GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented") +} +func (UnimplementedControllerServer) ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented") +} +func (UnimplementedControllerServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (UnimplementedControllerServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (UnimplementedControllerServer) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (UnimplementedControllerServer) ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented") +} +func (UnimplementedControllerServer) ControllerGetVolume(context.Context, *ControllerGetVolumeRequest) (*ControllerGetVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerGetVolume not implemented") +} +func (UnimplementedControllerServer) ControllerModifyVolume(context.Context, *ControllerModifyVolumeRequest) (*ControllerModifyVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerModifyVolume not implemented") +} +func (UnimplementedControllerServer) mustEmbedUnimplementedControllerServer() {} + +// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ControllerServer will +// result in compilation errors. +type UnsafeControllerServer interface { + mustEmbedUnimplementedControllerServer() +} + +func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) { + s.RegisterService(&Controller_ServiceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_CreateVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_DeleteVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerPublishVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerUnpublishVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ValidateVolumeCapabilities_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ListVolumes_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_GetCapacity_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerGetCapabilities_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_CreateSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_DeleteSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ListSnapshots_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerExpandVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerGetVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetVolume(ctx, req.(*ControllerGetVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerModifyVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerModifyVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerModifyVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Controller_ControllerModifyVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerModifyVolume(ctx, req.(*ControllerModifyVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Controller_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + { + MethodName: "ControllerExpandVolume", + Handler: _Controller_ControllerExpandVolume_Handler, + }, + { + MethodName: "ControllerGetVolume", + Handler: _Controller_ControllerGetVolume_Handler, + }, + { + MethodName: "ControllerModifyVolume", + Handler: _Controller_ControllerModifyVolume_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +const ( + GroupController_GroupControllerGetCapabilities_FullMethodName = "/csi.v1.GroupController/GroupControllerGetCapabilities" + GroupController_CreateVolumeGroupSnapshot_FullMethodName = "/csi.v1.GroupController/CreateVolumeGroupSnapshot" + GroupController_DeleteVolumeGroupSnapshot_FullMethodName = "/csi.v1.GroupController/DeleteVolumeGroupSnapshot" + GroupController_GetVolumeGroupSnapshot_FullMethodName = "/csi.v1.GroupController/GetVolumeGroupSnapshot" +) + +// GroupControllerClient is the client API for GroupController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GroupControllerClient interface { + GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) + CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) + DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) + GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) +} + +type groupControllerClient struct { + cc grpc.ClientConnInterface +} + +func NewGroupControllerClient(cc grpc.ClientConnInterface) GroupControllerClient { + return &groupControllerClient{cc} +} + +func (c *groupControllerClient) GroupControllerGetCapabilities(ctx context.Context, in *GroupControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*GroupControllerGetCapabilitiesResponse, error) { + out := new(GroupControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, GroupController_GroupControllerGetCapabilities_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) CreateVolumeGroupSnapshot(ctx context.Context, in *CreateVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*CreateVolumeGroupSnapshotResponse, error) { + out := new(CreateVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, GroupController_CreateVolumeGroupSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) DeleteVolumeGroupSnapshot(ctx context.Context, in *DeleteVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*DeleteVolumeGroupSnapshotResponse, error) { + out := new(DeleteVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, GroupController_DeleteVolumeGroupSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *groupControllerClient) GetVolumeGroupSnapshot(ctx context.Context, in *GetVolumeGroupSnapshotRequest, opts ...grpc.CallOption) (*GetVolumeGroupSnapshotResponse, error) { + out := new(GetVolumeGroupSnapshotResponse) + err := c.cc.Invoke(ctx, GroupController_GetVolumeGroupSnapshot_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GroupControllerServer is the server API for GroupController service. +// All implementations must embed UnimplementedGroupControllerServer +// for forward compatibility +type GroupControllerServer interface { + GroupControllerGetCapabilities(context.Context, *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) + CreateVolumeGroupSnapshot(context.Context, *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) + DeleteVolumeGroupSnapshot(context.Context, *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) + GetVolumeGroupSnapshot(context.Context, *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) + mustEmbedUnimplementedGroupControllerServer() +} + +// UnimplementedGroupControllerServer must be embedded to have forward compatible implementations. +type UnimplementedGroupControllerServer struct { +} + +func (UnimplementedGroupControllerServer) GroupControllerGetCapabilities(context.Context, *GroupControllerGetCapabilitiesRequest) (*GroupControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GroupControllerGetCapabilities not implemented") +} +func (UnimplementedGroupControllerServer) CreateVolumeGroupSnapshot(context.Context, *CreateVolumeGroupSnapshotRequest) (*CreateVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolumeGroupSnapshot not implemented") +} +func (UnimplementedGroupControllerServer) DeleteVolumeGroupSnapshot(context.Context, *DeleteVolumeGroupSnapshotRequest) (*DeleteVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolumeGroupSnapshot not implemented") +} +func (UnimplementedGroupControllerServer) GetVolumeGroupSnapshot(context.Context, *GetVolumeGroupSnapshotRequest) (*GetVolumeGroupSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetVolumeGroupSnapshot not implemented") +} +func (UnimplementedGroupControllerServer) mustEmbedUnimplementedGroupControllerServer() {} + +// UnsafeGroupControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GroupControllerServer will +// result in compilation errors. +type UnsafeGroupControllerServer interface { + mustEmbedUnimplementedGroupControllerServer() +} + +func RegisterGroupControllerServer(s grpc.ServiceRegistrar, srv GroupControllerServer) { + s.RegisterService(&GroupController_ServiceDesc, srv) +} + +func _GroupController_GroupControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GroupControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GroupController_GroupControllerGetCapabilities_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).GroupControllerGetCapabilities(ctx, req.(*GroupControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_CreateVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GroupController_CreateVolumeGroupSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).CreateVolumeGroupSnapshot(ctx, req.(*CreateVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_DeleteVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GroupController_DeleteVolumeGroupSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).DeleteVolumeGroupSnapshot(ctx, req.(*DeleteVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _GroupController_GetVolumeGroupSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVolumeGroupSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GroupController_GetVolumeGroupSnapshot_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GroupControllerServer).GetVolumeGroupSnapshot(ctx, req.(*GetVolumeGroupSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// GroupController_ServiceDesc is the grpc.ServiceDesc for GroupController service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GroupController_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.GroupController", + HandlerType: (*GroupControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GroupControllerGetCapabilities", + Handler: _GroupController_GroupControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateVolumeGroupSnapshot", + Handler: _GroupController_CreateVolumeGroupSnapshot_Handler, + }, + { + MethodName: "DeleteVolumeGroupSnapshot", + Handler: _GroupController_DeleteVolumeGroupSnapshot_Handler, + }, + { + MethodName: "GetVolumeGroupSnapshot", + Handler: _GroupController_GetVolumeGroupSnapshot_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +const ( + SnapshotMetadata_GetMetadataAllocated_FullMethodName = "/csi.v1.SnapshotMetadata/GetMetadataAllocated" + SnapshotMetadata_GetMetadataDelta_FullMethodName = "/csi.v1.SnapshotMetadata/GetMetadataDelta" +) + +// SnapshotMetadataClient is the client API for SnapshotMetadata service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SnapshotMetadataClient interface { + GetMetadataAllocated(ctx context.Context, in *GetMetadataAllocatedRequest, opts ...grpc.CallOption) (SnapshotMetadata_GetMetadataAllocatedClient, error) + GetMetadataDelta(ctx context.Context, in *GetMetadataDeltaRequest, opts ...grpc.CallOption) (SnapshotMetadata_GetMetadataDeltaClient, error) +} + +type snapshotMetadataClient struct { + cc grpc.ClientConnInterface +} + +func NewSnapshotMetadataClient(cc grpc.ClientConnInterface) SnapshotMetadataClient { + return &snapshotMetadataClient{cc} +} + +func (c *snapshotMetadataClient) GetMetadataAllocated(ctx context.Context, in *GetMetadataAllocatedRequest, opts ...grpc.CallOption) (SnapshotMetadata_GetMetadataAllocatedClient, error) { + stream, err := c.cc.NewStream(ctx, &SnapshotMetadata_ServiceDesc.Streams[0], SnapshotMetadata_GetMetadataAllocated_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &snapshotMetadataGetMetadataAllocatedClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SnapshotMetadata_GetMetadataAllocatedClient interface { + Recv() (*GetMetadataAllocatedResponse, error) + grpc.ClientStream +} + +type snapshotMetadataGetMetadataAllocatedClient struct { + grpc.ClientStream +} + +func (x *snapshotMetadataGetMetadataAllocatedClient) Recv() (*GetMetadataAllocatedResponse, error) { + m := new(GetMetadataAllocatedResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *snapshotMetadataClient) GetMetadataDelta(ctx context.Context, in *GetMetadataDeltaRequest, opts ...grpc.CallOption) (SnapshotMetadata_GetMetadataDeltaClient, error) { + stream, err := c.cc.NewStream(ctx, &SnapshotMetadata_ServiceDesc.Streams[1], SnapshotMetadata_GetMetadataDelta_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &snapshotMetadataGetMetadataDeltaClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SnapshotMetadata_GetMetadataDeltaClient interface { + Recv() (*GetMetadataDeltaResponse, error) + grpc.ClientStream +} + +type snapshotMetadataGetMetadataDeltaClient struct { + grpc.ClientStream +} + +func (x *snapshotMetadataGetMetadataDeltaClient) Recv() (*GetMetadataDeltaResponse, error) { + m := new(GetMetadataDeltaResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SnapshotMetadataServer is the server API for SnapshotMetadata service. +// All implementations must embed UnimplementedSnapshotMetadataServer +// for forward compatibility +type SnapshotMetadataServer interface { + GetMetadataAllocated(*GetMetadataAllocatedRequest, SnapshotMetadata_GetMetadataAllocatedServer) error + GetMetadataDelta(*GetMetadataDeltaRequest, SnapshotMetadata_GetMetadataDeltaServer) error + mustEmbedUnimplementedSnapshotMetadataServer() +} + +// UnimplementedSnapshotMetadataServer must be embedded to have forward compatible implementations. +type UnimplementedSnapshotMetadataServer struct { +} + +func (UnimplementedSnapshotMetadataServer) GetMetadataAllocated(*GetMetadataAllocatedRequest, SnapshotMetadata_GetMetadataAllocatedServer) error { + return status.Errorf(codes.Unimplemented, "method GetMetadataAllocated not implemented") +} +func (UnimplementedSnapshotMetadataServer) GetMetadataDelta(*GetMetadataDeltaRequest, SnapshotMetadata_GetMetadataDeltaServer) error { + return status.Errorf(codes.Unimplemented, "method GetMetadataDelta not implemented") +} +func (UnimplementedSnapshotMetadataServer) mustEmbedUnimplementedSnapshotMetadataServer() {} + +// UnsafeSnapshotMetadataServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SnapshotMetadataServer will +// result in compilation errors. +type UnsafeSnapshotMetadataServer interface { + mustEmbedUnimplementedSnapshotMetadataServer() +} + +func RegisterSnapshotMetadataServer(s grpc.ServiceRegistrar, srv SnapshotMetadataServer) { + s.RegisterService(&SnapshotMetadata_ServiceDesc, srv) +} + +func _SnapshotMetadata_GetMetadataAllocated_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetMetadataAllocatedRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SnapshotMetadataServer).GetMetadataAllocated(m, &snapshotMetadataGetMetadataAllocatedServer{stream}) +} + +type SnapshotMetadata_GetMetadataAllocatedServer interface { + Send(*GetMetadataAllocatedResponse) error + grpc.ServerStream +} + +type snapshotMetadataGetMetadataAllocatedServer struct { + grpc.ServerStream +} + +func (x *snapshotMetadataGetMetadataAllocatedServer) Send(m *GetMetadataAllocatedResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SnapshotMetadata_GetMetadataDelta_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetMetadataDeltaRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SnapshotMetadataServer).GetMetadataDelta(m, &snapshotMetadataGetMetadataDeltaServer{stream}) +} + +type SnapshotMetadata_GetMetadataDeltaServer interface { + Send(*GetMetadataDeltaResponse) error + grpc.ServerStream +} + +type snapshotMetadataGetMetadataDeltaServer struct { + grpc.ServerStream +} + +func (x *snapshotMetadataGetMetadataDeltaServer) Send(m *GetMetadataDeltaResponse) error { + return x.ServerStream.SendMsg(m) +} + +// SnapshotMetadata_ServiceDesc is the grpc.ServiceDesc for SnapshotMetadata service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SnapshotMetadata_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.SnapshotMetadata", + HandlerType: (*SnapshotMetadataServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetMetadataAllocated", + Handler: _SnapshotMetadata_GetMetadataAllocated_Handler, + ServerStreams: true, + }, + { + StreamName: "GetMetadataDelta", + Handler: _SnapshotMetadata_GetMetadataDelta_Handler, + ServerStreams: true, + }, + }, + Metadata: "csi.proto", +} + +const ( + Node_NodeStageVolume_FullMethodName = "/csi.v1.Node/NodeStageVolume" + Node_NodeUnstageVolume_FullMethodName = "/csi.v1.Node/NodeUnstageVolume" + Node_NodePublishVolume_FullMethodName = "/csi.v1.Node/NodePublishVolume" + Node_NodeUnpublishVolume_FullMethodName = "/csi.v1.Node/NodeUnpublishVolume" + Node_NodeGetVolumeStats_FullMethodName = "/csi.v1.Node/NodeGetVolumeStats" + Node_NodeExpandVolume_FullMethodName = "/csi.v1.Node/NodeExpandVolume" + Node_NodeGetCapabilities_FullMethodName = "/csi.v1.Node/NodeGetCapabilities" + Node_NodeGetInfo_FullMethodName = "/csi.v1.Node/NodeGetInfo" +) + +// NodeClient is the client API for Node service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc grpc.ClientConnInterface +} + +func NewNodeClient(cc grpc.ClientConnInterface) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := c.cc.Invoke(ctx, Node_NodeStageVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := c.cc.Invoke(ctx, Node_NodeUnstageVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := c.cc.Invoke(ctx, Node_NodePublishVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, Node_NodeUnpublishVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) { + out := new(NodeGetVolumeStatsResponse) + err := c.cc.Invoke(ctx, Node_NodeGetVolumeStats_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) { + out := new(NodeExpandVolumeResponse) + err := c.cc.Invoke(ctx, Node_NodeExpandVolume_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, Node_NodeGetCapabilities_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := c.cc.Invoke(ctx, Node_NodeGetInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServer is the server API for Node service. +// All implementations must embed UnimplementedNodeServer +// for forward compatibility +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) + mustEmbedUnimplementedNodeServer() +} + +// UnimplementedNodeServer must be embedded to have forward compatible implementations. +type UnimplementedNodeServer struct { +} + +func (UnimplementedNodeServer) NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented") +} +func (UnimplementedNodeServer) NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented") +} +func (UnimplementedNodeServer) NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented") +} +func (UnimplementedNodeServer) NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented") +} +func (UnimplementedNodeServer) NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented") +} +func (UnimplementedNodeServer) NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented") +} +func (UnimplementedNodeServer) NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented") +} +func (UnimplementedNodeServer) NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented") +} +func (UnimplementedNodeServer) mustEmbedUnimplementedNodeServer() {} + +// UnsafeNodeServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NodeServer will +// result in compilation errors. +type UnsafeNodeServer interface { + mustEmbedUnimplementedNodeServer() +} + +func RegisterNodeServer(s grpc.ServiceRegistrar, srv NodeServer) { + s.RegisterService(&Node_ServiceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeStageVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeUnstageVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodePublishVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeUnpublishVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetVolumeStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetVolumeStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeGetVolumeStats_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeExpandVolume_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeGetCapabilities_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Node_NodeGetInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Node_ServiceDesc is the grpc.ServiceDesc for Node service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Node_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetVolumeStats", + Handler: _Node_NodeGetVolumeStats_Handler, + }, + { + MethodName: "NodeExpandVolume", + Handler: _Node_NodeExpandVolume_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go new file mode 100644 index 00000000..147f756f --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -0,0 +1,266 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ +package dbus + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "sync" + + "github.com/godbus/dbus/v5" +) + +const ( + alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` + num = `0123456789` + alphanum = alpha + num + signalBuffer = 100 +) + +// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped +func needsEscape(i int, b byte) bool { + // Escape everything that is not a-z-A-Z-0-9 + // Also escape 0-9 if it's the first character + return strings.IndexByte(alphanum, b) == -1 || + (i == 0 && strings.IndexByte(num, b) != -1) +} + +// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the +// rules that systemd uses for serializing special characters. +func PathBusEscape(path string) string { + // Special case the empty string + if len(path) == 0 { + return "_" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if needsEscape(i, c) { + e := fmt.Sprintf("_%x", c) + n = append(n, []byte(e)...) + } else { + n = append(n, c) + } + } + return string(n) +} + +// pathBusUnescape is the inverse of PathBusEscape. +func pathBusUnescape(path string) string { + if path == "_" { + return "" + } + n := []byte{} + for i := 0; i < len(path); i++ { + c := path[i] + if c == '_' && i+2 < len(path) { + res, err := hex.DecodeString(path[i+1 : i+3]) + if err == nil { + n = append(n, res...) + } + i += 2 + } else { + n = append(n, c) + } + } + return string(n) +} + +// Conn is a connection to systemd's dbus endpoint. +type Conn struct { + // sysconn/sysobj are only used to call dbus methods + sysconn *dbus.Conn + sysobj dbus.BusObject + + // sigconn/sigobj are only used to receive dbus signals + sigconn *dbus.Conn + sigobj dbus.BusObject + + jobListener struct { + jobs map[dbus.ObjectPath]chan<- string + sync.Mutex + } + subStateSubscriber struct { + updateCh chan<- *SubStateUpdate + errCh chan<- error + sync.Mutex + ignore map[dbus.ObjectPath]int64 + cleanIgnore int64 + } + propertiesSubscriber struct { + updateCh chan<- *PropertiesUpdate + errCh chan<- error + sync.Mutex + } +} + +// Deprecated: use NewWithContext instead. +func New() (*Conn, error) { + return NewWithContext(context.Background()) +} + +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func NewWithContext(ctx context.Context) (*Conn, error) { + conn, err := NewSystemConnectionContext(ctx) + if err != nil && os.Geteuid() == 0 { + return NewSystemdConnectionContext(ctx) + } + return conn, err +} + +// Deprecated: use NewSystemConnectionContext instead. +func NewSystemConnection() (*Conn, error) { + return NewSystemConnectionContext(context.Background()) +} + +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) + }) +} + +// Deprecated: use NewUserConnectionContext instead. +func NewUserConnection() (*Conn, error) { + return NewUserConnectionContext(context.Background()) +} + +// NewUserConnectionContext establishes a connection to the session bus and +// authenticates. This can be used to connect to systemd user instances. +// Callers should call Close() when done with the connection. +func NewUserConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) + }) +} + +// Deprecated: use NewSystemdConnectionContext instead. +func NewSystemdConnection() (*Conn, error) { + return NewSystemdConnectionContext(context.Background()) +} + +// NewSystemdConnectionContext establishes a private, direct connection to systemd. +// This can be used for communicating with systemd without a dbus daemon. +// Callers should call Close() when done with the connection. +func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { + return NewConnection(func() (*dbus.Conn, error) { + // We skip Hello when talking directly to systemd. + return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private", opts...) + }) + }) +} + +// Close closes an established connection. +func (c *Conn) Close() { + c.sysconn.Close() + c.sigconn.Close() +} + +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + +// NewConnection establishes a connection to a bus using a caller-supplied function. +// This allows connecting to remote buses through a user-supplied mechanism. +// The supplied function may be called multiple times, and should return independent connections. +// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, +// and any authentication should be handled by the function. +func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { + sysconn, err := dialBus() + if err != nil { + return nil, err + } + + sigconn, err := dialBus() + if err != nil { + sysconn.Close() + return nil, err + } + + c := &Conn{ + sysconn: sysconn, + sysobj: systemdObject(sysconn), + sigconn: sigconn, + sigobj: systemdObject(sigconn), + } + + c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) + c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) + + // Setup the listeners on jobs so that we can get completions + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") + + c.dispatch() + return c, nil +} + +// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager +// interface. The value is returned in its string representation, as defined at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (c *Conn) GetManagerProperty(prop string) (string, error) { + variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) + if err != nil { + return "", err + } + return variant.String(), nil +} + +func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus(dbus.WithContext(ctx)) + if err != nil { + return nil, err + } + + // Only use EXTERNAL method, and hardcode the uid (not username) + // to avoid a username lookup (which requires a dynamically linked + // libc) + methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} + + err = conn.Auth(methods) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(ctx, createBus) + if err != nil { + return nil, err + } + + if err = conn.Hello(); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +func systemdObject(conn *dbus.Conn) dbus.BusObject { + return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go new file mode 100644 index 00000000..074148cb --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -0,0 +1,864 @@ +// Copyright 2015, 2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "context" + "errors" + "fmt" + "path" + "strconv" + + "github.com/godbus/dbus/v5" +) + +// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +type Who string + +const ( + // All sends the signal to all processes in the unit + All Who = "all" + // Main sends the signal to the main process of the unit + Main Who = "main" + // Control sends the signal to the control process of the unit + Control Who = "control" +) + +func (c *Conn) jobComplete(signal *dbus.Signal) { + var id uint32 + var job dbus.ObjectPath + var unit string + var result string + dbus.Store(signal.Body, &id, &job, &unit, &result) + c.jobListener.Lock() + out, ok := c.jobListener.jobs[job] + if ok { + out <- result + delete(c.jobListener.jobs, job) + } + c.jobListener.Unlock() +} + +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { + if ch != nil { + c.jobListener.Lock() + defer c.jobListener.Unlock() + } + + var p dbus.ObjectPath + err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) + if err != nil { + return 0, err + } + + if ch != nil { + c.jobListener.jobs[p] = ch + } + + // ignore error since 0 is fine if conversion fails + jobID, _ := strconv.Atoi(path.Base(string(p))) + + return jobID, nil +} + +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise +// specified by the mode string). +// +// Takes the unit to activate, plus a mode string. The mode needs to be one of +// replace, fail, isolate, ignore-dependencies, ignore-requirements. If +// "replace" the call will start the unit and its dependencies, possibly +// replacing already queued jobs that conflict with this. If "fail" the call +// will start the unit and its dependencies, but will fail if this would change +// an already queued job. If "isolate" the call will start the unit in question +// and terminate all units that aren't dependencies of it. If +// "ignore-dependencies" it will start a unit but ignore all its dependencies. +// If "ignore-requirements" it will start a unit but only ignore the +// requirement dependencies. It is not recommended to make use of the latter +// two options. +// +// If the provided channel is non-nil, a result string will be sent to it upon +// job completion: one of done, canceled, timeout, failed, dependency, skipped. +// done indicates successful execution of a job. canceled indicates that a job +// has been canceled before it finished execution. timeout indicates that the +// job timeout was reached. failed indicates that the job failed. dependency +// indicates that a job this job has been depending on failed and the job hence +// has been removed too. skipped indicates that a job was skipped because it +// didn't apply to the units current state. +// +// If no error occurs, the ID of the underlying systemd job will be returned. There +// does exist the possibility for no error to be returned, but for the returned job +// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint +// should not be considered authoritative. +// +// If an error does occur, it will be returned to the user alongside a job ID of 0. +func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +} + +// Deprecated: use StopUnitContext instead. +func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StopUnitContext(context.Background(), name, mode, ch) +} + +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. +func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) +} + +// Deprecated: use ReloadUnitContext instead. +func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadUnitContext(context.Background(), name, mode, ch) +} + +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. +func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// Deprecated: use RestartUnitContext instead. +func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.RestartUnitContext(context.Background(), name, mode, ch) +} + +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// Deprecated: use TryRestartUnitContext instead. +func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.TryRestartUnitContext(context.Background(), name, mode, ch) +} + +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. +func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrRestartUnitContext instead. +func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. +func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrTryRestartUnitContext instead. +func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) +} + +// Deprecated: use StartTransientUnitContext instead. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) +} + +// StartTransientUnitContext may be used to create and start a transient unit, which +// will be released as soon as it is not running or referenced anymore or the +// system is rebooted. name is the unit name including suffix, and must be +// unique. mode is the same as in StartUnitContext, properties contains properties +// of the unit. +func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +} + +// Deprecated: use KillUnitContext instead. +func (c *Conn) KillUnit(name string, signal int32) { + c.KillUnitContext(context.Background(), name, signal) +} + +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. +func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { + c.KillUnitWithTarget(ctx, name, All, signal) +} + +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. +func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() +} + +// Deprecated: use ResetFailedUnitContext instead. +func (c *Conn) ResetFailedUnit(name string) error { + return c.ResetFailedUnitContext(context.Background(), name) +} + +// ResetFailedUnitContext resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() +} + +// Deprecated: use SystemStateContext instead. +func (c *Conn) SystemState() (*Property, error) { + return c.SystemStateContext(context.Background()) +} + +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. +func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { + var err error + var prop dbus.Variant + + obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: "SystemState", Value: prop}, nil +} + +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { + var err error + var props map[string]dbus.Variant + + if !path.IsValid() { + return nil, fmt.Errorf("invalid unit name: %v", path) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + if err != nil { + return nil, err + } + + out := make(map[string]interface{}, len(props)) + for k, v := range props { + out[k] = v.Value() + } + + return out, nil +} + +// Deprecated: use GetUnitPropertiesContext instead. +func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.GetUnitPropertiesContext(context.Background(), unit) +} + +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetUnitPathPropertiesContext instead. +func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { + return c.GetUnitPathPropertiesContext(context.Background(), path) +} + +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetAllPropertiesContext instead. +func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + return c.GetAllPropertiesContext(context.Background(), unit) +} + +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "") +} + +func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { + var err error + var prop dbus.Variant + + path := unitPath(unit) + if !path.IsValid() { + return nil, errors.New("invalid unit name: " + unit) + } + + obj := c.sysconn.Object("org.freedesktop.systemd1", path) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + if err != nil { + return nil, err + } + + return &Property{Name: propertyName, Value: prop}, nil +} + +// Deprecated: use GetUnitPropertyContext instead. +func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { + return c.GetUnitPropertyContext(context.Background(), unit, propertyName) +} + +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. +func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// Deprecated: use GetServicePropertyContext instead. +func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { + return c.GetServicePropertyContext(context.Background(), service, propertyName) +} + +// GetServiceProperty returns property for given service name and property name. +func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { + return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) +} + +// Deprecated: use GetUnitTypePropertiesContext instead. +func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) +} + +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { + path := unitPath(unit) + return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) +} + +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) +} + +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. +// Not all properties may be changed at runtime, but many resource management +// settings (primarily those in systemd.cgroup(5)) may. The changes are applied +// instantly, and stored on disk for future boots, unless runtime is true, in which +// case the settings only apply until the next reboot. name is the name of the unit +// to modify. properties are the settings to set, encoded as an array of property +// name and value pairs. +func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +} + +// Deprecated: use GetUnitTypePropertyContext instead. +func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { + return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) +} + +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. +func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) +} + +type UnitStatus struct { + Name string // The primary unit name as string + Description string // The human readable description string + LoadState string // The load state (i.e. whether the unit file has been loaded successfully) + ActiveState string // The active state (i.e. whether the unit is currently started or not) + SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) + Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. + Path dbus.ObjectPath // The unit object path + JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise + JobType string // The job type as string + JobPath dbus.ObjectPath // The job object path +} + +type storeFunc func(retvalues ...interface{}) error + +func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]UnitStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + err = dbus.Store(resultInterface, statusInterface...) + if err != nil { + return nil, err + } + + return status, nil +} + +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + +// Deprecated: use ListUnitsContext instead. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.ListUnitsContext(context.Background()) +} + +// ListUnitsContext returns an array with all currently loaded units. Note that +// units may be known by multiple names at the same time, and hence there might +// be more unit names loaded than actual units behind them. +// Also note that a unit is only loaded if it is active and/or enabled. +// Units that are both disabled and inactive will thus not be returned. +func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +} + +// Deprecated: use ListUnitsFilteredContext instead. +func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { + return c.ListUnitsFilteredContext(context.Background(), states) +} + +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// Deprecated: use ListUnitsByPatternsContext instead. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.ListUnitsByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitsByPatternsContext returns an array with units. +// It takes a list of units' statuses and names to filter. +// Note that units may be known by multiple names at the same time, +// and hence there might be more unit names loaded than actual units behind them. +func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +} + +// Deprecated: use ListUnitsByNamesContext instead. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.ListUnitsByNamesContext(context.Background(), units) +} + +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext +// method, this method returns statuses even for inactive or non-existing +// units. Input array should contain exact unit names, but not patterns. +// +// Requires systemd v230 or higher. +func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +} + +type UnitFile struct { + Path string + Type string +} + +func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { + result := make([][]interface{}, 0) + err := f(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + files := make([]UnitFile, len(result)) + fileInterface := make([]interface{}, len(files)) + for i := range files { + fileInterface[i] = &files[i] + } + + err = dbus.Store(resultInterface, fileInterface...) + if err != nil { + return nil, err + } + + return files, nil +} + +// Deprecated: use ListUnitFilesContext instead. +func (c *Conn) ListUnitFiles() ([]UnitFile, error) { + return c.ListUnitFilesContext(context.Background()) +} + +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// Deprecated: use ListUnitFilesByPatternsContext instead. +func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { + return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) +} + +type LinkUnitFileChange EnableUnitFileChange + +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the +// usual unit search paths) into the unit search path. +// +// It takes a list of absolute paths to unit files to link and two +// booleans. +// +// The first boolean controls whether the unit shall be +// enabled for runtime only (true, /run), or persistently (false, +// /etc). +// +// The second controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns a list of the changes made. The list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]LinkUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). +// +// It takes a list of unit files to enable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and two booleans: the first controls whether the unit shall +// be enabled for runtime only (true, /run), or persistently (false, /etc). +// The second one controls whether symlinks pointing to other units shall +// be replaced if necessary. +// +// This call returns one boolean and an array with the changes made. The +// boolean signals whether the unit files contained any enablement +// information (i.e. an [Install]) section. The changes list consists of +// structures with three strings: the type of the change (one of symlink +// or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + var carries_install_info bool + + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + if err != nil { + return false, nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]EnableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return false, nil, err + } + + return carries_install_info, changes, nil +} + +type EnableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). +// +// It takes a list of unit files to disable (either just file names or full +// absolute paths if the unit files are residing outside the usual unit +// search paths), and one boolean: whether the unit was enabled for runtime +// only (true, /run), or persistently (false, /etc). +// +// This call returns an array with the changes made. The changes list +// consists of structures with three strings: the type of the change (one of +// symlink or unlink), the file name of the symlink and the destination of the +// symlink. +func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]DisableUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type DisableUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use MaskUnitFilesContext instead. +func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + return c.MaskUnitFilesContext(context.Background(), files, runtime, force) +} + +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]MaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type MaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use UnmaskUnitFilesContext instead. +func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + return c.UnmaskUnitFilesContext(context.Background(), files, runtime) +} + +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + result := make([][]interface{}, 0) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + if err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + changes := make([]UnmaskUnitFileChange, len(result)) + changesInterface := make([]interface{}, len(changes)) + for i := range changes { + changesInterface[i] = &changes[i] + } + + err = dbus.Store(resultInterface, changesInterface...) + if err != nil { + return nil, err + } + + return changes, nil +} + +type UnmaskUnitFileChange struct { + Type string // Type of the change (one of symlink or unlink) + Filename string // File name of the symlink + Destination string // Destination of the symlink +} + +// Deprecated: use ReloadContext instead. +func (c *Conn) Reload() error { + return c.ReloadContext(context.Background()) +} + +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. +func (c *Conn) ReloadContext(ctx context.Context) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() +} + +func unitPath(name string) dbus.ObjectPath { + return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) +} + +// unitName returns the unescaped base element of the supplied escaped path. +func unitName(dpath dbus.ObjectPath) string { + return pathBusUnescape(path.Base(string(dpath))) +} + +// JobStatus holds a currently queued job definition. +type JobStatus struct { + Id uint32 // The numeric job id + Unit string // The primary unit name for this job + JobType string // The job type as string + Status string // The job state as string + JobPath dbus.ObjectPath // The job object path + UnitPath dbus.ObjectPath // The unit object path +} + +// Deprecated: use ListJobsContext instead. +func (c *Conn) ListJobs() ([]JobStatus, error) { + return c.ListJobsContext(context.Background()) +} + +// ListJobsContext returns an array with all currently queued jobs. +func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { + return c.listJobsInternal(ctx) +} + +func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { + result := make([][]interface{}, 0) + if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]JobStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + if err := dbus.Store(resultInterface, statusInterface...); err != nil { + return nil, err + } + + return status, nil +} + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go new file mode 100644 index 00000000..fb42b627 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/properties.go @@ -0,0 +1,237 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "github.com/godbus/dbus/v5" +) + +// From the systemd docs: +// +// The properties array of StartTransientUnit() may take many of the settings +// that may also be configured in unit files. Not all parameters are currently +// accepted though, but we plan to cover more properties with future release. +// Currently you may set the Description, Slice and all dependency types of +// units, as well as RemainAfterExit, ExecStart for service units, +// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, +// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, +// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, +// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map +// directly to their counterparts in unit files and as normal D-Bus object +// properties. The exception here is the PIDs field of scope units which is +// used for construction of the scope only and specifies the initial PIDs to +// add to the scope object. + +type Property struct { + Name string + Value dbus.Variant +} + +type PropertyCollection struct { + Name string + Properties []Property +} + +type execStart struct { + Path string // the binary path to execute + Args []string // an array with all arguments to pass to the executed command, starting with argument 0 + UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly +} + +// PropExecStart sets the ExecStart service property. The first argument is a +// slice with the binary path to execute followed by the arguments to pass to +// the executed command. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= +func PropExecStart(command []string, uncleanIsFailure bool) Property { + execStarts := []execStart{ + { + Path: command[0], + Args: command, + UncleanIsFailure: uncleanIsFailure, + }, + } + + return Property{ + Name: "ExecStart", + Value: dbus.MakeVariant(execStarts), + } +} + +// PropRemainAfterExit sets the RemainAfterExit service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= +func PropRemainAfterExit(b bool) Property { + return Property{ + Name: "RemainAfterExit", + Value: dbus.MakeVariant(b), + } +} + +// PropType sets the Type service property. See +// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= +func PropType(t string) Property { + return Property{ + Name: "Type", + Value: dbus.MakeVariant(t), + } +} + +// PropDescription sets the Description unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= +func PropDescription(desc string) Property { + return Property{ + Name: "Description", + Value: dbus.MakeVariant(desc), + } +} + +func propDependency(name string, units []string) Property { + return Property{ + Name: name, + Value: dbus.MakeVariant(units), + } +} + +// PropRequires sets the Requires unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= +func PropRequires(units ...string) Property { + return propDependency("Requires", units) +} + +// PropRequiresOverridable sets the RequiresOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= +func PropRequiresOverridable(units ...string) Property { + return propDependency("RequiresOverridable", units) +} + +// PropRequisite sets the Requisite unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= +func PropRequisite(units ...string) Property { + return propDependency("Requisite", units) +} + +// PropRequisiteOverridable sets the RequisiteOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= +func PropRequisiteOverridable(units ...string) Property { + return propDependency("RequisiteOverridable", units) +} + +// PropWants sets the Wants unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= +func PropWants(units ...string) Property { + return propDependency("Wants", units) +} + +// PropBindsTo sets the BindsTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= +func PropBindsTo(units ...string) Property { + return propDependency("BindsTo", units) +} + +// PropRequiredBy sets the RequiredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= +func PropRequiredBy(units ...string) Property { + return propDependency("RequiredBy", units) +} + +// PropRequiredByOverridable sets the RequiredByOverridable unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= +func PropRequiredByOverridable(units ...string) Property { + return propDependency("RequiredByOverridable", units) +} + +// PropWantedBy sets the WantedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= +func PropWantedBy(units ...string) Property { + return propDependency("WantedBy", units) +} + +// PropBoundBy sets the BoundBy unit property. See +// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= +func PropBoundBy(units ...string) Property { + return propDependency("BoundBy", units) +} + +// PropConflicts sets the Conflicts unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= +func PropConflicts(units ...string) Property { + return propDependency("Conflicts", units) +} + +// PropConflictedBy sets the ConflictedBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= +func PropConflictedBy(units ...string) Property { + return propDependency("ConflictedBy", units) +} + +// PropBefore sets the Before unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= +func PropBefore(units ...string) Property { + return propDependency("Before", units) +} + +// PropAfter sets the After unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= +func PropAfter(units ...string) Property { + return propDependency("After", units) +} + +// PropOnFailure sets the OnFailure unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= +func PropOnFailure(units ...string) Property { + return propDependency("OnFailure", units) +} + +// PropTriggers sets the Triggers unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= +func PropTriggers(units ...string) Property { + return propDependency("Triggers", units) +} + +// PropTriggeredBy sets the TriggeredBy unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= +func PropTriggeredBy(units ...string) Property { + return propDependency("TriggeredBy", units) +} + +// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= +func PropPropagatesReloadTo(units ...string) Property { + return propDependency("PropagatesReloadTo", units) +} + +// PropRequiresMountsFor sets the RequiresMountsFor unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= +func PropRequiresMountsFor(units ...string) Property { + return propDependency("RequiresMountsFor", units) +} + +// PropSlice sets the Slice unit property. See +// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= +func PropSlice(slice string) Property { + return Property{ + Name: "Slice", + Value: dbus.MakeVariant(slice), + } +} + +// PropPids sets the PIDs field of scope units used in the initial construction +// of the scope only and specifies the initial PIDs to add to the scope object. +// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties +func PropPids(pids ...uint32) Property { + return Property{ + Name: "PIDs", + Value: dbus.MakeVariant(pids), + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go new file mode 100644 index 00000000..17c5d485 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/set.go @@ -0,0 +1,47 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +type set struct { + data map[string]bool +} + +func (s *set) Add(value string) { + s.data[value] = true +} + +func (s *set) Remove(value string) { + delete(s.data, value) +} + +func (s *set) Contains(value string) (exists bool) { + _, exists = s.data[value] + return +} + +func (s *set) Length() int { + return len(s.data) +} + +func (s *set) Values() (values []string) { + for val := range s.data { + values = append(values, val) + } + return +} + +func newSet() *set { + return &set{make(map[string]bool)} +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go new file mode 100644 index 00000000..7e370fea --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription.go @@ -0,0 +1,333 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "errors" + "log" + "time" + + "github.com/godbus/dbus/v5" +) + +const ( + cleanIgnoreInterval = int64(10 * time.Second) + ignoreInterval = int64(30 * time.Millisecond) +) + +// Subscribe sets up this connection to subscribe to all systemd dbus events. +// This is required before calling SubscribeUnits. When the connection closes +// systemd will automatically stop sending signals so there is no need to +// explicitly call Unsubscribe(). +func (c *Conn) Subscribe() error { + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") + c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, + "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") + + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() +} + +// Unsubscribe this connection from systemd dbus events. +func (c *Conn) Unsubscribe() error { + return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() +} + +func (c *Conn) dispatch() { + ch := make(chan *dbus.Signal, signalBuffer) + + c.sigconn.Signal(ch) + + go func() { + for { + signal, ok := <-ch + if !ok { + return + } + + if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { + c.jobComplete(signal) + } + + if c.subStateSubscriber.updateCh == nil && + c.propertiesSubscriber.updateCh == nil { + continue + } + + var unitPath dbus.ObjectPath + switch signal.Name { + case "org.freedesktop.systemd1.Manager.JobRemoved": + unitName := signal.Body[2].(string) + c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) + case "org.freedesktop.systemd1.Manager.UnitNew": + unitPath = signal.Body[1].(dbus.ObjectPath) + case "org.freedesktop.DBus.Properties.PropertiesChanged": + if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { + unitPath = signal.Path + + if len(signal.Body) >= 2 { + if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { + c.sendPropertiesUpdate(unitPath, changed) + } + } + } + } + + if unitPath == dbus.ObjectPath("") { + continue + } + + c.sendSubStateUpdate(unitPath) + } + }() +} + +// SubscribeUnits returns two unbuffered channels which will receive all changed units every +// interval. Deleted units are sent as nil. +func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { + return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) +} + +// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer +// size of the channels, the comparison function for detecting changes and a filter +// function for cutting down on the noise that your channel receives. +func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { + old := make(map[string]*UnitStatus) + statusChan := make(chan map[string]*UnitStatus, buffer) + errChan := make(chan error, buffer) + + go func() { + for { + timerChan := time.After(interval) + + units, err := c.ListUnits() + if err == nil { + cur := make(map[string]*UnitStatus) + for i := range units { + if filterUnit != nil && filterUnit(units[i].Name) { + continue + } + cur[units[i].Name] = &units[i] + } + + // add all new or changed units + changed := make(map[string]*UnitStatus) + for n, u := range cur { + if oldU, ok := old[n]; !ok || isChanged(oldU, u) { + changed[n] = u + } + delete(old, n) + } + + // add all deleted units + for oldN := range old { + changed[oldN] = nil + } + + old = cur + + if len(changed) != 0 { + statusChan <- changed + } + } else { + errChan <- err + } + + <-timerChan + } + }() + + return statusChan, errChan +} + +type SubStateUpdate struct { + UnitName string + SubState string +} + +// SetSubStateSubscriber writes to updateCh when any unit's substate changes. +// Although this writes to updateCh on every state change, the reported state +// may be more recent than the change that generated it (due to an unavoidable +// race in the systemd dbus interface). That is, this method provides a good +// way to keep a current view of all units' states, but is not guaranteed to +// show every state transition they go through. Furthermore, state changes +// will only be written to the channel with non-blocking writes. If updateCh +// is full, it attempts to write an error to errCh; if errCh is full, the error +// passes silently. +func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { + if c == nil { + msg := "nil receiver" + select { + case errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + c.subStateSubscriber.updateCh = updateCh + c.subStateSubscriber.errCh = errCh +} + +func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { + c.subStateSubscriber.Lock() + defer c.subStateSubscriber.Unlock() + + if c.subStateSubscriber.updateCh == nil { + return + } + + isIgnored := c.shouldIgnore(unitPath) + defer c.cleanIgnore() + if isIgnored { + return + } + + info, err := c.GetUnitPathProperties(unitPath) + if err != nil { + select { + case c.subStateSubscriber.errCh <- err: + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + defer c.updateIgnore(unitPath, info) + + name, ok := info["Id"].(string) + if !ok { + msg := "failed to cast info.Id" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", err) + } + return + } + substate, ok := info["SubState"].(string) + if !ok { + msg := "failed to cast info.SubState" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } + + update := &SubStateUpdate{name, substate} + select { + case c.subStateSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.subStateSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} + +// The ignore functions work around a wart in the systemd dbus interface. +// Requesting the properties of an unloaded unit will cause systemd to send a +// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's +// properties on UnitNew (as that's the only indication of a new unit coming up +// for the first time), we would enter an infinite loop if we did not attempt +// to detect and ignore these spurious signals. The signal themselves are +// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an +// unloaded unit's signals for a short time after requesting its properties. +// This means that we will miss e.g. a transient unit being restarted +// *immediately* upon failure and also a transient unit being started +// immediately after requesting its status (with systemctl status, for example, +// because this causes a UnitNew signal to be sent which then causes us to fetch +// the properties). + +func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { + t, ok := c.subStateSubscriber.ignore[path] + return ok && t >= time.Now().UnixNano() +} + +func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { + loadState, ok := info["LoadState"].(string) + if !ok { + return + } + + // unit is unloaded - it will trigger bad systemd dbus behavior + if loadState == "not-found" { + c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval + } +} + +// without this, ignore would grow unboundedly over time +func (c *Conn) cleanIgnore() { + now := time.Now().UnixNano() + if c.subStateSubscriber.cleanIgnore < now { + c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval + + for p, t := range c.subStateSubscriber.ignore { + if t < now { + delete(c.subStateSubscriber.ignore, p) + } + } + } +} + +// PropertiesUpdate holds a map of a unit's changed properties +type PropertiesUpdate struct { + UnitName string + Changed map[string]dbus.Variant +} + +// SetPropertiesSubscriber writes to updateCh when any unit's properties +// change. Every property change reported by systemd will be sent; that is, no +// transitions will be "missed" (as they might be with SetSubStateSubscriber). +// However, state changes will only be written to the channel with non-blocking +// writes. If updateCh is full, it attempts to write an error to errCh; if +// errCh is full, the error passes silently. +func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + c.propertiesSubscriber.updateCh = updateCh + c.propertiesSubscriber.errCh = errCh +} + +// we don't need to worry about shouldIgnore() here because +// sendPropertiesUpdate doesn't call GetProperties() +func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { + c.propertiesSubscriber.Lock() + defer c.propertiesSubscriber.Unlock() + + if c.propertiesSubscriber.updateCh == nil { + return + } + + update := &PropertiesUpdate{unitName(unitPath), changedProps} + + select { + case c.propertiesSubscriber.updateCh <- update: + default: + msg := "update channel is full" + select { + case c.propertiesSubscriber.errCh <- errors.New(msg): + default: + log.Printf("full error channel while reporting: %s\n", msg) + } + return + } +} diff --git a/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go new file mode 100644 index 00000000..5b408d58 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/dbus/subscription_set.go @@ -0,0 +1,57 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dbus + +import ( + "time" +) + +// SubscriptionSet returns a subscription set which is like conn.Subscribe but +// can filter to only return events for a set of units. +type SubscriptionSet struct { + *set + conn *Conn +} + +func (s *SubscriptionSet) filter(unit string) bool { + return !s.Contains(unit) +} + +// Subscribe starts listening for dbus events for all of the units in the set. +// Returns channels identical to conn.SubscribeUnits. +func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { + // TODO: Make fully evented by using systemd 209 with properties changed values + return s.conn.SubscribeUnitsCustom(time.Second, 0, + mismatchUnitStatus, + func(unit string) bool { return s.filter(unit) }, + ) +} + +// NewSubscriptionSet returns a new subscription set. +func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { + return &SubscriptionSet{newSet(), conn} +} + +// mismatchUnitStatus returns true if the provided UnitStatus objects +// are not equivalent. false is returned if the objects are equivalent. +// Only the Name, Description and state-related fields are used in +// the comparison. +func mismatchUnitStatus(u1, u2 *UnitStatus) bool { + return u1.Name != u2.Name || + u1.Description != u2.Description || + u1.LoadState != u2.LoadState || + u1.ActiveState != u2.ActiveState || + u1.SubState != u2.SubState +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7c..92b78048 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,6 +1,24 @@ # Change history of go-restful -## [v3.11.0] - 2023-08-19 + +## [v3.12.1] - 2024-05-28 + +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + +## [v3.12.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080..7234604e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f9..80adf55f 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5..6fd2bcd5 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824..9808752a 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 87116516..00000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae..00000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e..a9b3faaa 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 00000000..e9bb0efe --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 00000000..2e6eca44 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1486 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML