From b6da3e41c9594c7e43e2050fc58b6317b49add24 Mon Sep 17 00:00:00 2001 From: Sandeep Rajan Date: Tue, 20 Aug 2019 13:55:33 -0400 Subject: [PATCH] update dependencies --- Godeps/LICENSES | 209 +++ go.mod | 10 +- go.sum | 10 +- .../src/k8s.io/apiextensions-apiserver/go.mod | 2 +- .../src/k8s.io/apiextensions-apiserver/go.sum | 4 +- staging/src/k8s.io/cli-runtime/go.mod | 2 +- staging/src/k8s.io/cli-runtime/go.sum | 4 +- staging/src/k8s.io/kube-aggregator/go.mod | 2 +- staging/src/k8s.io/kube-aggregator/go.sum | 4 +- staging/src/k8s.io/kubectl/go.mod | 2 +- staging/src/k8s.io/kubectl/go.sum | 4 +- staging/src/k8s.io/sample-apiserver/go.mod | 2 +- staging/src/k8s.io/sample-apiserver/go.sum | 4 +- staging/src/k8s.io/sample-cli-plugin/go.mod | 2 +- staging/src/k8s.io/sample-cli-plugin/go.sum | 4 +- vendor/BUILD | 1 + .../coredns/corefile-migration/LICENSE | 201 +++ .../corefile-migration/migration/BUILD | 31 + .../migration/corefile/BUILD | 24 + .../migration/corefile/corefile.go | 179 ++ .../corefile-migration/migration/migrate.go | 445 +++++ .../corefile-migration/migration/notice.go | 48 + .../corefile-migration/migration/versions.go | 1504 +++++++++++++++++ vendor/github.com/spf13/cobra/.gitignore | 2 + vendor/github.com/spf13/cobra/BUILD | 2 + vendor/github.com/spf13/cobra/README.md | 9 +- .../spf13/cobra/bash_completions.go | 48 - vendor/github.com/spf13/cobra/command.go | 97 +- .../spf13/cobra/powershell_completions.go | 100 ++ .../spf13/cobra/powershell_completions.md | 14 + .../spf13/cobra/shell_completions.go | 85 + .../github.com/spf13/cobra/zsh_completions.go | 400 +++-- .../github.com/spf13/cobra/zsh_completions.md | 39 + vendor/modules.txt | 7 +- 34 files changed, 3320 insertions(+), 181 deletions(-) create mode 100644 vendor/github.com/coredns/corefile-migration/LICENSE create mode 100644 vendor/github.com/coredns/corefile-migration/migration/BUILD create mode 100644 vendor/github.com/coredns/corefile-migration/migration/corefile/BUILD create mode 100644 vendor/github.com/coredns/corefile-migration/migration/corefile/corefile.go create mode 100644 vendor/github.com/coredns/corefile-migration/migration/migrate.go create mode 100644 vendor/github.com/coredns/corefile-migration/migration/notice.go create mode 100644 vendor/github.com/coredns/corefile-migration/migration/versions.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 1e9b0478e6f..e6c06bca938 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -3008,6 +3008,215 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================================ +================================================================================ += vendor/github.com/coredns/corefile-migration licensed under: = + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/coredns/corefile-migration/LICENSE 86d3f3a95c324c9479bd8986968f4327 +================================================================================ + + ================================================================================ = vendor/github.com/coreos/bbolt licensed under: = diff --git a/go.mod b/go.mod index 1bdbeff0917..5615abc1281 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e github.com/blang/semver v3.5.0+incompatible github.com/boltdb/bolt v1.3.1 // indirect - github.com/caddyserver/caddy v1.0.1 + github.com/caddyserver/caddy v1.0.3 github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b // indirect github.com/client9/misspell v0.3.4 @@ -35,6 +35,7 @@ require ( github.com/containerd/containerd v1.0.2 // indirect github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 // indirect github.com/containernetworking/cni v0.7.1 + github.com/coredns/corefile-migration v1.0.2 github.com/coreos/etcd v3.3.13+incompatible github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea @@ -113,7 +114,7 @@ require ( github.com/seccomp/libseccomp-golang v0.9.1 // indirect github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect github.com/spf13/afero v1.2.2 - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.3 github.com/spf13/viper v1.3.2 @@ -201,7 +202,7 @@ replace ( github.com/bifurcation/mint => github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115 github.com/blang/semver => github.com/blang/semver v3.5.0+incompatible github.com/boltdb/bolt => github.com/boltdb/bolt v1.3.1 - github.com/caddyserver/caddy => github.com/caddyserver/caddy v1.0.1 + github.com/caddyserver/caddy => github.com/caddyserver/caddy v1.0.3 github.com/cenkalti/backoff => github.com/cenkalti/backoff v2.1.1+incompatible github.com/cespare/prettybench => github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c github.com/chai2010/gettext-go => github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 @@ -216,6 +217,7 @@ replace ( github.com/containerd/containerd => github.com/containerd/containerd v1.0.2 github.com/containerd/typeurl => github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 github.com/containernetworking/cni => github.com/containernetworking/cni v0.7.1 + github.com/coredns/corefile-migration => github.com/coredns/corefile-migration v1.0.2 github.com/coreos/bbolt => github.com/coreos/bbolt v1.3.1-coreos.6 github.com/coreos/etcd => github.com/coreos/etcd v3.3.13+incompatible github.com/coreos/go-etcd => github.com/coreos/go-etcd v2.0.0+incompatible @@ -376,7 +378,7 @@ replace ( github.com/soheilhy/cmux => github.com/soheilhy/cmux v0.1.3 github.com/spf13/afero => github.com/spf13/afero v1.2.2 github.com/spf13/cast => github.com/spf13/cast v1.3.0 - github.com/spf13/cobra => github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra => github.com/spf13/cobra v0.0.5 github.com/spf13/jwalterweatherman => github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/pflag => github.com/spf13/pflag v1.0.3 github.com/spf13/viper => github.com/spf13/viper v1.3.2 diff --git a/go.sum b/go.sum index 0eb37bd16a7..587e0a2b0f5 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17 github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caddyserver/caddy v1.0.1 h1:oor6ep+8NoJOabpFXhvjqjfeldtw1XSzfISVrbfqTKo= -github.com/caddyserver/caddy v1.0.1/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/caddyserver/caddy v1.0.3 h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w= +github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c h1:p8i+qCbr/dNhS2FoQhRpSS7X5+IlxTa94nRNYXu4fyo= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= @@ -77,6 +77,8 @@ github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20 h1:14r0i3IeJj6z github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/corefile-migration v1.0.2 h1:kQga1ATFIZdkBtU6c/oJdtASLcCRkDh3fW8vVyVdvUc= +github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coreos/bbolt v1.3.1-coreos.6 h1:uTXKg9gY70s9jMAKdfljFQcuh4e/BXOM+V+d00KFj3A= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= @@ -375,8 +377,8 @@ github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.mod b/staging/src/k8s.io/apiextensions-apiserver/go.mod index aa8112565cf..a8d0f5cb17c 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -17,7 +17,7 @@ require ( github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d github.com/pborman/uuid v1.2.0 github.com/prometheus/client_golang v0.9.2 - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 gopkg.in/yaml.v2 v2.2.2 diff --git a/staging/src/k8s.io/apiextensions-apiserver/go.sum b/staging/src/k8s.io/apiextensions-apiserver/go.sum index 22ecb277ebd..969d82cae06 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -224,8 +224,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/cli-runtime/go.mod b/staging/src/k8s.io/cli-runtime/go.mod index e715319f783..9848d0e4be5 100644 --- a/staging/src/k8s.io/cli-runtime/go.mod +++ b/staging/src/k8s.io/cli-runtime/go.mod @@ -11,7 +11,7 @@ require ( github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4 // indirect github.com/go-openapi/spec v0.19.2 // indirect github.com/pkg/errors v0.8.0 - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 golang.org/x/text v0.3.2 diff --git a/staging/src/k8s.io/cli-runtime/go.sum b/staging/src/k8s.io/cli-runtime/go.sum index ca262b5b221..09d97b9564d 100644 --- a/staging/src/k8s.io/cli-runtime/go.sum +++ b/staging/src/k8s.io/cli-runtime/go.sum @@ -117,8 +117,8 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/kube-aggregator/go.mod b/staging/src/k8s.io/kube-aggregator/go.mod index bf115e149d5..02db1f2e62d 100644 --- a/staging/src/k8s.io/kube-aggregator/go.mod +++ b/staging/src/k8s.io/kube-aggregator/go.mod @@ -10,7 +10,7 @@ require ( github.com/go-openapi/spec v0.19.2 github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/prometheus/client_golang v0.9.2 - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc diff --git a/staging/src/k8s.io/kube-aggregator/go.sum b/staging/src/k8s.io/kube-aggregator/go.sum index 026dbb500fe..93028efc729 100644 --- a/staging/src/k8s.io/kube-aggregator/go.sum +++ b/staging/src/k8s.io/kube-aggregator/go.sum @@ -180,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/kubectl/go.mod b/staging/src/k8s.io/kubectl/go.mod index d74f916764c..35998aa7c8b 100644 --- a/staging/src/k8s.io/kubectl/go.mod +++ b/staging/src/k8s.io/kubectl/go.mod @@ -29,7 +29,7 @@ require ( github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/russross/blackfriday v1.5.2 github.com/sirupsen/logrus v1.4.2 // indirect - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index 6d8eac43846..e5c211942c8 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -167,8 +167,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/sample-apiserver/go.mod b/staging/src/k8s.io/sample-apiserver/go.mod index 35268c5b804..7f284c83ce7 100644 --- a/staging/src/k8s.io/sample-apiserver/go.mod +++ b/staging/src/k8s.io/sample-apiserver/go.mod @@ -7,7 +7,7 @@ go 1.12 require ( github.com/go-openapi/spec v0.19.2 github.com/google/gofuzz v1.0.0 - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 k8s.io/client-go v0.0.0 diff --git a/staging/src/k8s.io/sample-apiserver/go.sum b/staging/src/k8s.io/sample-apiserver/go.sum index 0b8d625d8f7..02932c0da50 100644 --- a/staging/src/k8s.io/sample-apiserver/go.sum +++ b/staging/src/k8s.io/sample-apiserver/go.sum @@ -177,8 +177,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/staging/src/k8s.io/sample-cli-plugin/go.mod b/staging/src/k8s.io/sample-cli-plugin/go.mod index a4a298a8b4c..c0473766070 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.mod +++ b/staging/src/k8s.io/sample-cli-plugin/go.mod @@ -5,7 +5,7 @@ module k8s.io/sample-cli-plugin go 1.12 require ( - github.com/spf13/cobra v0.0.4 + github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.3 k8s.io/cli-runtime v0.0.0 k8s.io/client-go v0.0.0 diff --git a/staging/src/k8s.io/sample-cli-plugin/go.sum b/staging/src/k8s.io/sample-cli-plugin/go.sum index ca262b5b221..09d97b9564d 100644 --- a/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -117,8 +117,8 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.4 h1:S0tLZ3VOKl2Te0hpq8+ke0eSJPfCnNTPiDlsfwi1/NE= -github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= diff --git a/vendor/BUILD b/vendor/BUILD index 48d5b12d093..01e1922375f 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -100,6 +100,7 @@ filegroup( "//vendor/github.com/containernetworking/cni/pkg/invoke:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/types:all-srcs", "//vendor/github.com/containernetworking/cni/pkg/version:all-srcs", + "//vendor/github.com/coredns/corefile-migration/migration:all-srcs", "//vendor/github.com/coreos/bbolt:all-srcs", "//vendor/github.com/coreos/etcd/alarm:all-srcs", "//vendor/github.com/coreos/etcd/auth:all-srcs", diff --git a/vendor/github.com/coredns/corefile-migration/LICENSE b/vendor/github.com/coredns/corefile-migration/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coredns/corefile-migration/migration/BUILD b/vendor/github.com/coredns/corefile-migration/migration/BUILD new file mode 100644 index 00000000000..5d8b33dc377 --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/BUILD @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "migrate.go", + "notice.go", + "versions.go", + ], + importmap = "k8s.io/kubernetes/vendor/github.com/coredns/corefile-migration/migration", + importpath = "github.com/coredns/corefile-migration/migration", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/coredns/corefile-migration/migration/corefile:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//vendor/github.com/coredns/corefile-migration/migration/corefile:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coredns/corefile-migration/migration/corefile/BUILD b/vendor/github.com/coredns/corefile-migration/migration/corefile/BUILD new file mode 100644 index 00000000000..5f424b6f9ad --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/corefile/BUILD @@ -0,0 +1,24 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["corefile.go"], + importmap = "k8s.io/kubernetes/vendor/github.com/coredns/corefile-migration/migration/corefile", + importpath = "github.com/coredns/corefile-migration/migration/corefile", + visibility = ["//visibility:public"], + deps = ["//vendor/github.com/caddyserver/caddy/caddyfile:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/coredns/corefile-migration/migration/corefile/corefile.go b/vendor/github.com/coredns/corefile-migration/migration/corefile/corefile.go new file mode 100644 index 00000000000..e2c2131c7b2 --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/corefile/corefile.go @@ -0,0 +1,179 @@ +package corefile + +import ( + "strings" + + "github.com/caddyserver/caddy/caddyfile" +) + +type Corefile struct { + Servers []*Server +} + +type Server struct { + DomPorts []string + Plugins []*Plugin +} + +type Plugin struct { + Name string + Args []string + Options []*Option +} + +type Option struct { + Name string + Args []string +} + +func New(s string) (*Corefile, error) { + c := Corefile{} + cc := caddyfile.NewDispenser("migration", strings.NewReader(s)) + depth := 0 + var cSvr *Server + var cPlg *Plugin + for cc.Next() { + if cc.Val() == "{" { + depth += 1 + continue + } else if cc.Val() == "}" { + depth -= 1 + continue + } + val := cc.Val() + args := cc.RemainingArgs() + switch depth { + case 0: + c.Servers = append(c.Servers, + &Server{ + DomPorts: append([]string{val}, args...), + }) + cSvr = c.Servers[len(c.Servers)-1] + case 1: + cSvr.Plugins = append(cSvr.Plugins, + &Plugin{ + Name: val, + Args: args, + }) + cPlg = cSvr.Plugins[len(cSvr.Plugins)-1] + case 2: + cPlg.Options = append(cPlg.Options, + &Option{ + Name: val, + Args: args, + }) + } + } + return &c, nil +} + +func (c *Corefile) ToString() (out string) { + strs := []string{} + for _, s := range c.Servers { + strs = append(strs, s.ToString()) + } + return strings.Join(strs, "\n") +} + +func (s *Server) ToString() (out string) { + str := strings.Join(s.DomPorts, " ") + strs := []string{} + for _, p := range s.Plugins { + strs = append(strs, strings.Repeat(" ", indent)+p.ToString()) + } + if len(strs) > 0 { + str += " {\n" + strings.Join(strs, "\n") + "\n}\n" + } + return str +} + +func (p *Plugin) ToString() (out string) { + str := strings.Join(append([]string{p.Name}, p.Args...), " ") + strs := []string{} + for _, o := range p.Options { + strs = append(strs, strings.Repeat(" ", indent*2)+o.ToString()) + } + if len(strs) > 0 { + str += " {\n" + strings.Join(strs, "\n") + "\n" + strings.Repeat(" ", indent*1) + "}" + } + return str +} + +func (o *Option) ToString() (out string) { + str := strings.Join(append([]string{o.Name}, o.Args...), " ") + return str +} + +func (s *Server) FindMatch(def []*Server) (*Server, bool) { +NextServer: + for _, sDef := range def { + for i, dp := range sDef.DomPorts { + if dp == "*" { + continue + } + if dp == "***" { + return sDef, true + } + if i >= len(s.DomPorts) || dp != s.DomPorts[i] { + continue NextServer + } + } + if len(sDef.DomPorts) != len(s.DomPorts) { + continue + } + return sDef, true + } + return nil, false +} + +func (p *Plugin) FindMatch(def []*Plugin) (*Plugin, bool) { +NextPlugin: + for _, pDef := range def { + if pDef.Name != p.Name { + continue + } + for i, arg := range pDef.Args { + if arg == "*" { + continue + } + if arg == "***" { + return pDef, true + } + if i >= len(p.Args) || arg != p.Args[i] { + continue NextPlugin + } + } + if len(pDef.Args) != len(p.Args) { + continue + } + return pDef, true + } + return nil, false +} + +func (o *Option) FindMatch(def []*Option) (*Option, bool) { +NextPlugin: + for _, oDef := range def { + if oDef.Name != o.Name { + continue + } + for i, arg := range oDef.Args { + if arg == "*" { + continue + } + if arg == "***" { + return oDef, true + } + if i >= len(o.Args) || arg != o.Args[i] { + continue NextPlugin + } + } + if len(oDef.Args) != len(o.Args) { + continue + } + return oDef, true + } + return nil, false +} + +const indent = 4 diff --git a/vendor/github.com/coredns/corefile-migration/migration/migrate.go b/vendor/github.com/coredns/corefile-migration/migration/migrate.go new file mode 100644 index 00000000000..573f78da52e --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/migrate.go @@ -0,0 +1,445 @@ +package migration + +// This package provides a set of functions to help handle migrations of CoreDNS Corefiles to be compatible with new +// versions of CoreDNS. The task of upgrading CoreDNS is the responsibility of a variety of Kubernetes management tools +// (e.g. kubeadm and others), and the precise behavior may be different for each one. This library abstracts some basic +// helper functions that make this easier to implement. + +import ( + "fmt" + "sort" + + "github.com/coredns/corefile-migration/migration/corefile" +) + +// Deprecated returns a list of deprecation notifications affecting the guven Corefile. Notifications are returned for +// any deprecated, removed, or ignored plugins/directives present in the Corefile. Notifications are also returned for +// any new default plugins that would be added in a migration. +func Deprecated(fromCoreDNSVersion, toCoreDNSVersion, corefileStr string) ([]Notice, error) { + return getStatus(fromCoreDNSVersion, toCoreDNSVersion, corefileStr, all) +} + +// Unsupported returns a list notifications of plugins/options that are not handled supported by this migration tool, +// but may still be valid in CoreDNS. +func Unsupported(fromCoreDNSVersion, toCoreDNSVersion, corefileStr string) ([]Notice, error) { + return getStatus(fromCoreDNSVersion, toCoreDNSVersion, corefileStr, unsupported) +} + +func getStatus(fromCoreDNSVersion, toCoreDNSVersion, corefileStr, status string) ([]Notice, error) { + if fromCoreDNSVersion == toCoreDNSVersion { + return nil, nil + } + err := validUpMigration(fromCoreDNSVersion, toCoreDNSVersion) + if err != nil { + return nil, err + } + cf, err := corefile.New(corefileStr) + if err != nil { + return nil, err + } + notices := []Notice{} + v := fromCoreDNSVersion + for { + v = Versions[v].nextVersion + for _, s := range cf.Servers { + for _, p := range s.Plugins { + vp, present := Versions[v].plugins[p.Name] + if status == unsupported { + if present { + continue + } + notices = append(notices, Notice{Plugin: p.Name, Severity: status, Version: v}) + continue + } + if !present { + continue + } + if vp.status != "" && vp.status != newdefault { + notices = append(notices, Notice{ + Plugin: p.Name, + Severity: vp.status, + Version: v, + ReplacedBy: vp.replacedBy, + Additional: vp.additional, + }) + continue + } + for _, o := range p.Options { + vo, present := Versions[v].plugins[p.Name].options[o.Name] + if status == unsupported { + if present { + continue + } + notices = append(notices, Notice{ + Plugin: p.Name, + Option: o.Name, + Severity: status, + Version: v, + ReplacedBy: vo.replacedBy, + Additional: vo.additional, + }) + continue + } + if !present { + continue + } + if vo.status != "" && vo.status != newdefault { + notices = append(notices, Notice{Plugin: p.Name, Option: o.Name, Severity: vo.status, Version: v}) + continue + } + } + if status != unsupported { + CheckForNewOptions: + for name, vo := range Versions[v].plugins[p.Name].options { + if vo.status != newdefault { + continue + } + for _, o := range p.Options { + if name == o.Name { + continue CheckForNewOptions + } + } + notices = append(notices, Notice{Plugin: p.Name, Option: name, Severity: newdefault, Version: v}) + } + } + } + if status != unsupported { + CheckForNewPlugins: + for name, vp := range Versions[v].plugins { + if vp.status != newdefault { + continue + } + for _, p := range s.Plugins { + if name == p.Name { + continue CheckForNewPlugins + } + } + notices = append(notices, Notice{Plugin: name, Option: "", Severity: newdefault, Version: v}) + } + } + } + if v == toCoreDNSVersion { + break + } + } + return notices, nil +} + +// Migrate returns the Corefile converted to toCoreDNSVersion, or an error if it cannot. This function only accepts +// a forward migration, where the destination version is => the start version. +// If deprecations is true, deprecated plugins/options will be migrated as soon as they are deprecated. +// If deprecations is false, deprecated plugins/options will be migrated only once they become removed or ignored. +func Migrate(fromCoreDNSVersion, toCoreDNSVersion, corefileStr string, deprecations bool) (string, error) { + if fromCoreDNSVersion == toCoreDNSVersion { + return corefileStr, nil + } + err := validUpMigration(fromCoreDNSVersion, toCoreDNSVersion) + if err != nil { + return "", err + } + cf, err := corefile.New(corefileStr) + if err != nil { + return "", err + } + v := fromCoreDNSVersion + for { + v = Versions[v].nextVersion + newSrvs := []*corefile.Server{} + for _, s := range cf.Servers { + newPlugs := []*corefile.Plugin{} + for _, p := range s.Plugins { + vp, present := Versions[v].plugins[p.Name] + if !present { + newPlugs = append(newPlugs, p) + continue + } + if !deprecations && vp.status == deprecated { + newPlugs = append(newPlugs, p) + continue + } + if vp.action != nil { + p, err := vp.action(p) + if err != nil { + return "", err + } + if p == nil { + // remove plugin, skip options processing + continue + } + } + newOpts := []*corefile.Option{} + for _, o := range p.Options { + vo, present := Versions[v].plugins[p.Name].options[o.Name] + if !present { + newOpts = append(newOpts, o) + continue + } + if !deprecations && vo.status == deprecated { + newOpts = append(newOpts, o) + continue + } + if vo.action == nil { + newOpts = append(newOpts, o) + continue + } + o, err := vo.action(o) + if err != nil { + return "", err + } + if o == nil { + // remove option + continue + } + newOpts = append(newOpts, o) + } + newPlug := &corefile.Plugin{ + Name: p.Name, + Args: p.Args, + Options: newOpts, + } + CheckForNewOptions: + for name, vo := range Versions[v].plugins[p.Name].options { + if vo.status != newdefault { + continue + } + for _, o := range p.Options { + if name == o.Name { + continue CheckForNewOptions + } + } + newPlug, err = vo.add(newPlug) + if err != nil { + return "", err + } + } + + newPlugs = append(newPlugs, newPlug) + } + newSrv := &corefile.Server{ + DomPorts: s.DomPorts, + Plugins: newPlugs, + } + CheckForNewPlugins: + for name, vp := range Versions[v].plugins { + if vp.status != newdefault { + continue + } + for _, p := range s.Plugins { + if name == p.Name { + continue CheckForNewPlugins + } + } + newSrv, err = vp.add(newSrv) + if err != nil { + return "", err + } + } + + newSrvs = append(newSrvs, newSrv) + } + + cf = &corefile.Corefile{Servers: newSrvs} + + // apply any global corefile level post processing + if Versions[v].postProcess != nil { + cf, err = Versions[v].postProcess(cf) + if err != nil { + return "", err + } + } + + if v == toCoreDNSVersion { + break + } + } + return cf.ToString(), nil +} + +// MigrateDown returns the Corefile converted to toCoreDNSVersion, or an error if it cannot. This function only accepts +// a downward migration, where the destination version is <= the start version. +func MigrateDown(fromCoreDNSVersion, toCoreDNSVersion, corefileStr string) (string, error) { + if fromCoreDNSVersion == toCoreDNSVersion { + return corefileStr, nil + } + err := validDownMigration(fromCoreDNSVersion, toCoreDNSVersion) + if err != nil { + return "", err + } + cf, err := corefile.New(corefileStr) + if err != nil { + return "", err + } + v := fromCoreDNSVersion + for { + newSrvs := []*corefile.Server{} + for _, s := range cf.Servers { + newPlugs := []*corefile.Plugin{} + for _, p := range s.Plugins { + vp, present := Versions[v].plugins[p.Name] + if !present { + newPlugs = append(newPlugs, p) + continue + } + if vp.downAction == nil { + newPlugs = append(newPlugs, p) + continue + } + p, err := vp.downAction(p) + if err != nil { + return "", err + } + if p == nil { + // remove plugin, skip options processing + continue + } + + newOpts := []*corefile.Option{} + for _, o := range p.Options { + vo, present := Versions[v].plugins[p.Name].options[o.Name] + if !present { + newOpts = append(newOpts, o) + continue + } + if vo.downAction == nil { + newOpts = append(newOpts, o) + continue + } + o, err := vo.downAction(o) + if err != nil { + return "", err + } + if o == nil { + // remove option + continue + } + newOpts = append(newOpts, o) + } + newPlug := &corefile.Plugin{ + Name: p.Name, + Args: p.Args, + Options: newOpts, + } + newPlugs = append(newPlugs, newPlug) + } + newSrv := &corefile.Server{ + DomPorts: s.DomPorts, + Plugins: newPlugs, + } + newSrvs = append(newSrvs, newSrv) + } + + cf = &corefile.Corefile{Servers: newSrvs} + + if v == toCoreDNSVersion { + break + } + v = Versions[v].priorVersion + } + return cf.ToString(), nil +} + +// Default returns true if the Corefile is the default for a given version of Kubernetes. +// Or, if k8sVersion is empty, Default returns true if the Corefile is the default for any version of Kubernetes. +func Default(k8sVersion, corefileStr string) bool { + cf, err := corefile.New(corefileStr) + if err != nil { + return false + } +NextVersion: + for _, v := range Versions { + for _, release := range v.k8sReleases { + if k8sVersion != "" && k8sVersion != release { + continue + } + } + defCf, err := corefile.New(v.defaultConf) + if err != nil { + continue + } + // check corefile against k8s release default + if len(cf.Servers) != len(defCf.Servers) { + continue NextVersion + } + for _, s := range cf.Servers { + defS, found := s.FindMatch(defCf.Servers) + if !found { + continue NextVersion + } + if len(s.Plugins) != len(defS.Plugins) { + continue NextVersion + } + for _, p := range s.Plugins { + defP, found := p.FindMatch(defS.Plugins) + if !found { + continue NextVersion + } + if len(p.Options) != len(defP.Options) { + continue NextVersion + } + for _, o := range p.Options { + _, found := o.FindMatch(defP.Options) + if !found { + continue NextVersion + } + } + } + } + return true + } + return false +} + +// Released returns true if dockerImageSHA matches any released image of CoreDNS. +func Released(dockerImageSHA string) bool { + for _, v := range Versions { + if v.dockerImageSHA == dockerImageSHA { + return true + } + } + return false +} + +// ValidVersions returns a list of all versions defined +func ValidVersions() []string { + var vStrs []string + for vStr := range Versions { + vStrs = append(vStrs, vStr) + } + sort.Strings(vStrs) + return vStrs +} + +func validateVersion(fromCoreDNSVersion string) error { + if _, ok := Versions[fromCoreDNSVersion]; !ok { + return fmt.Errorf("start version '%v' not supported", fromCoreDNSVersion) + } + return nil +} + +func validUpMigration(fromCoreDNSVersion, toCoreDNSVersion string) error { + err := validateVersion(fromCoreDNSVersion) + if err != nil { + return err + } + for next := Versions[fromCoreDNSVersion].nextVersion; next != ""; next = Versions[next].nextVersion { + if next != toCoreDNSVersion { + continue + } + return nil + } + return fmt.Errorf("cannot migrate up to '%v' from '%v'", toCoreDNSVersion, fromCoreDNSVersion) +} + +func validDownMigration(fromCoreDNSVersion, toCoreDNSVersion string) error { + err := validateVersion(fromCoreDNSVersion) + if err != nil { + return err + } + for prior := Versions[fromCoreDNSVersion].priorVersion; prior != ""; prior = Versions[prior].priorVersion { + if prior != toCoreDNSVersion { + continue + } + return nil + } + return fmt.Errorf("cannot migrate down to '%v' from '%v'", toCoreDNSVersion, fromCoreDNSVersion) +} diff --git a/vendor/github.com/coredns/corefile-migration/migration/notice.go b/vendor/github.com/coredns/corefile-migration/migration/notice.go new file mode 100644 index 00000000000..c5778d3e921 --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/notice.go @@ -0,0 +1,48 @@ +package migration + +import "fmt" + +// Notice is a migration warning +type Notice struct { + Plugin string + Option string + Severity string // 'deprecated', 'removed', or 'unsupported' + ReplacedBy string + Additional string + Version string +} + +func (n *Notice) ToString() string { + s := "" + if n.Option == "" { + s += fmt.Sprintf(`Plugin "%v" `, n.Plugin) + } else { + s += fmt.Sprintf(`Option "%v" in plugin "%v" `, n.Option, n.Plugin) + } + if n.Severity == unsupported { + s += "is unsupported by this migration tool in " + n.Version + "." + } else if n.Severity == newdefault { + s += "is added as a default in " + n.Version + "." + } else { + s += "is " + n.Severity + " in " + n.Version + "." + } + if n.ReplacedBy != "" { + s += fmt.Sprintf(` It is replaced by "%v".`, n.ReplacedBy) + } + if n.Additional != "" { + s += " " + n.Additional + } + return s +} + +const ( + // The following statuses are used to indicate the state of support/deprecation in a given release. + deprecated = "deprecated" // deprecated, but still completely functional + ignored = "ignored" // if included in the corefile, it will be ignored by CoreDNS + removed = "removed" // completely removed from CoreDNS, and would cause CoreDNS to exit if present in the Corefile + newdefault = "newdefault" // added to the default corefile. CoreDNS may not function properly if it is not present in the corefile. + unsupported = "unsupported" // the plugin/option is not supported by the migration tool + + // The following statuses are used for selecting/filtering notifications + all = "all" // show all statuses +) diff --git a/vendor/github.com/coredns/corefile-migration/migration/versions.go b/vendor/github.com/coredns/corefile-migration/migration/versions.go new file mode 100644 index 00000000000..3598bd8ba64 --- /dev/null +++ b/vendor/github.com/coredns/corefile-migration/migration/versions.go @@ -0,0 +1,1504 @@ +package migration + +import ( + "errors" + "github.com/coredns/corefile-migration/migration/corefile" +) + +type plugin struct { + status string + replacedBy string + additional string + options map[string]option + action pluginActionFn // action affecting this plugin only + add serverActionFn // action to add a new plugin to the server block + downAction pluginActionFn // downgrade action affecting this plugin only +} + +type option struct { + status string + replacedBy string + additional string + action optionActionFn // action affecting this option only + add pluginActionFn // action to add the option to the plugin + downAction optionActionFn // downgrade action affecting this option only +} + +type release struct { + k8sReleases []string + nextVersion string + priorVersion string + dockerImageSHA string + plugins map[string]plugin // list of plugins with deprecation status and migration actions + + // postProcess is a post processing action to take on the corefile as a whole. Used for complex migration + // tasks that dont fit well into the modular plugin/option migration framework. For example, when the + // action on a plugin would need to extend beyond the scope of that plugin (affecting other plugins, or + // server blocks, etc). e.g. Splitting plugins out into separate server blocks. + postProcess corefileAction + + // defaultConf holds the default Corefile template packaged with the corresponding k8sReleases. + // Wildcards are used for fuzzy matching: + // "*" matches exactly one token + // "***" matches 0 all remaining tokens on the line + // Order of server blocks, plugins, and options does not matter. + // Order of arguments does matter. + defaultConf string +} + +type corefileAction func(*corefile.Corefile) (*corefile.Corefile, error) +type serverActionFn func(*corefile.Server) (*corefile.Server, error) +type pluginActionFn func(*corefile.Plugin) (*corefile.Plugin, error) +type optionActionFn func(*corefile.Option) (*corefile.Option, error) + +func removePlugin(*corefile.Plugin) (*corefile.Plugin, error) { return nil, nil } +func removeOption(*corefile.Option) (*corefile.Option, error) { return nil, nil } + +func renamePlugin(p *corefile.Plugin, to string) (*corefile.Plugin, error) { + p.Name = to + return p, nil +} + +func addToServerBlockWithPlugins(sb *corefile.Server, newPlugin *corefile.Plugin, with []string) (*corefile.Server, error) { + if len(with) == 0 { + // add to all blocks + sb.Plugins = append(sb.Plugins, newPlugin) + return sb, nil + } + for _, p := range sb.Plugins { + for _, w := range with { + if w == p.Name { + // add to this block + sb.Plugins = append(sb.Plugins, newPlugin) + return sb, nil + } + } + } + return sb, nil +} + +func addToKubernetesServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { + return addToServerBlockWithPlugins(sb, newPlugin, []string{"kubernetes"}) +} + +func addToForwardingServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { + return addToServerBlockWithPlugins(sb, newPlugin, []string{"forward", "proxy"}) +} + +func addToAllServerBlocks(sb *corefile.Server, newPlugin *corefile.Plugin) (*corefile.Server, error) { + return addToServerBlockWithPlugins(sb, newPlugin, []string{}) +} + +var Versions = map[string]release{ + "1.6.2": { + priorVersion: "1.6.1", + dockerImageSHA: "12eb885b8685b1b13a04ecf5c23bc809c2e57917252fd7b0be9e9c00644e8ee5", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.6.1": { + nextVersion: "1.6.2", + priorVersion: "1.6.0", + dockerImageSHA: "9ae3b6fcac4ee821362277de6bd8fd2236fa7d3e19af2ef0406d80b595620a7a", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.6.0": { + nextVersion: "1.6.1", + priorVersion: "1.5.2", + dockerImageSHA: "263d03f2b889a75a0b91e035c2a14d45d7c1559c53444c5f7abf3a76014b779d", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": { + status: removed, + action: removeOption, + }, + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.5.2": { + nextVersion: "1.6.0", + priorVersion: "1.5.1", + dockerImageSHA: "586d15ec14911ee680ac9c5af20ff24b9d1412fbbf0e05862ee1f5c37baa65b2", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": { + status: deprecated, + action: removeOption, + }, + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.5.1": { + nextVersion: "1.5.2", + priorVersion: "1.5.0", + dockerImageSHA: "451817637035535ae1fc8639753b453fa4b781d0dea557d5da5cb3c131e62ef5", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": { + status: deprecated, + action: removeOption, + }, + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.5.0": { + nextVersion: "1.5.1", + priorVersion: "1.4.0", + dockerImageSHA: "e83beb5e43f8513fa735e77ffc5859640baea30a882a11cc75c4c3244a737d3c", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "ready": { + status: newdefault, + add: func(c *corefile.Server) (*corefile.Server, error) { + return addToKubernetesServerBlocks(c, &corefile.Plugin{Name: "ready"}) + }, + downAction: removePlugin, + }, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": { + status: deprecated, + action: removeOption, + }, + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: ignored, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "proxy": { + status: removed, + replacedBy: "forward", + action: proxyToForwardPluginAction, + options: proxyToForwardOptionsMigrations, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + postProcess: breakForwardStubDomainsIntoServerBlocks, + }, + "1.4.0": { + nextVersion: "1.5.0", + priorVersion: "1.3.1", + dockerImageSHA: "70a92e9f6fc604f9b629ca331b6135287244a86612f550941193ec7e12759417", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": { + status: ignored, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": { + status: deprecated, + action: removeOption, + }, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "proxy": { + status: deprecated, + replacedBy: "forward", + action: proxyToForwardPluginAction, + options: proxyToForwardOptionsMigrations, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + postProcess: breakForwardStubDomainsIntoServerBlocks, + }, + "1.3.1": { + nextVersion: "1.4.0", + priorVersion: "1.3.0", + k8sReleases: []string{"1.15", "1.14"}, + dockerImageSHA: "02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4", + defaultConf: `.:53 { + errors + health + kubernetes * *** { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . * + cache 30 + loop + reload + loadbalance +}`, + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": { + status: deprecated, + action: useFirstArgumentOnly, + }, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.3.0": { + nextVersion: "1.3.1", + priorVersion: "1.2.6", + dockerImageSHA: "e030773c7fee285435ed7fc7623532ee54c4c1c4911fb24d95cd0170a8a768bc", + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "k8s_external": { + downAction: removePlugin, + options: map[string]option{ + "apex": {}, + "ttl": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.6": { + nextVersion: "1.3.0", + priorVersion: "1.2.5", + k8sReleases: []string{"1.13"}, + dockerImageSHA: "81936728011c0df9404cb70b95c17bbc8af922ec9a70d0561a5d01fefa6ffa51", + defaultConf: `.:53 { + errors + health + kubernetes * *** { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . * + cache 30 + loop + reload + loadbalance +}`, + plugins: map[string]plugin{ + "errors": { + options: map[string]option{ + "consolidate": {}, + }, + }, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.5": { + nextVersion: "1.2.6", + priorVersion: "1.2.4", + dockerImageSHA: "33c8da20b887ae12433ec5c40bfddefbbfa233d5ce11fb067122e68af30291d6", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.4": { + nextVersion: "1.2.5", + priorVersion: "1.2.3", + dockerImageSHA: "a0d40ad961a714c699ee7b61b77441d165f6252f9fb84ac625d04a8d8554c0ec", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.3": { + nextVersion: "1.2.4", + priorVersion: "1.2.2", + dockerImageSHA: "12f3cab301c826978fac736fd40aca21ac023102fd7f4aa6b4341ae9ba89e90e", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "kubeconfig": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.2": { + nextVersion: "1.2.3", + priorVersion: "1.2.1", + k8sReleases: []string{"1.12"}, + dockerImageSHA: "3e2be1cec87aca0b74b7668bbe8c02964a95a402e45ceb51b2252629d608d03a", + defaultConf: `.:53 { + errors + health + kubernetes * *** { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . * + cache 30 + loop + reload + loadbalance +}`, + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": {}, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.1": { + nextVersion: "1.2.2", + priorVersion: "1.2.0", + dockerImageSHA: "fb129c6a7c8912bc6d9cc4505e1f9007c5565ceb1aa6369750e60cc79771a244", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": {}, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "loop": { + status: newdefault, + add: func(s *corefile.Server) (*corefile.Server, error) { + return addToForwardingServerBlocks(s, &corefile.Plugin{Name: "loop"}) + }, + downAction: removePlugin, + }, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.2.0": { + nextVersion: "1.2.1", + priorVersion: "1.1.4", + dockerImageSHA: "ae69a32f8cc29a3e2af9628b6473f24d3e977950a2cb62ce8911478a61215471", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": { + status: removed, + action: proxyRemoveHttpsGoogleProtocol, + }, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "prefer_udp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.1.4": { + nextVersion: "1.2.0", + priorVersion: "1.1.3", + dockerImageSHA: "463c7021141dd3bfd4a75812f4b735ef6aadc0253a128f15ffe16422abe56e50", + plugins: map[string]plugin{ + "errors": {}, + "log": { + options: map[string]option{ + "class": {}, + }, + }, + "health": {}, + "autopath": {}, + "kubernetes": { + options: map[string]option{ + "resyncperiod": {}, + "endpoint": {}, + "tls": {}, + "namespaces": {}, + "labels": {}, + "pods": {}, + "endpoint_pod_names": {}, + "upstream": {}, + "ttl": {}, + "noendpoints": {}, + "transfer": {}, + "fallthrough": {}, + "ignore": {}, + }, + }, + "prometheus": {}, + "proxy": { + options: map[string]option{ + "policy": {}, + "fail_timeout": {}, + "max_fails": {}, + "health_check": {}, + "except": {}, + "spray": {}, + "protocol": { + status: ignored, + action: proxyRemoveHttpsGoogleProtocol, + }, + }, + }, + "forward": { + options: map[string]option{ + "except": {}, + "force_tcp": {}, + "expire": {}, + "max_fails": {}, + "tls": {}, + "tls_servername": {}, + "policy": {}, + "health_check": {}, + }, + }, + "cache": { + options: map[string]option{ + "success": {}, + "denial": {}, + "prefetch": {}, + }, + }, + "reload": {}, + "loadbalance": {}, + }, + }, + "1.1.3": { + nextVersion: "1.1.4", + k8sReleases: []string{"1.11"}, + dockerImageSHA: "a5dd18e048983c7401e15648b55c3ef950601a86dd22370ef5dfc3e72a108aaa", + defaultConf: `.:53 { + errors + health + kubernetes * *** { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . * + cache 30 + reload +}`}, +} + +var proxyToForwardOptionsMigrations = map[string]option{ + "policy": { + action: func(o *corefile.Option) (*corefile.Option, error) { + if len(o.Args) == 2 && o.Args[1] == "least_conn" { + o.Name = "force_tcp" + o.Args = nil + } + return o, nil + }, + }, + "except": {}, + "fail_timeout": {action: removeOption}, + "max_fails": {action: removeOption}, + "health_check": {action: removeOption}, + "spray": {action: removeOption}, + "protocol": { + action: func(o *corefile.Option) (*corefile.Option, error) { + if len(o.Args) >= 2 && o.Args[1] == "force_tcp" { + o.Name = "force_tcp" + o.Args = nil + return o, nil + } + return nil, nil + }, + }, +} + +var proxyToForwardPluginAction = func(p *corefile.Plugin) (*corefile.Plugin, error) { + return renamePlugin(p, "forward") +} + +var useFirstArgumentOnly = func(o *corefile.Option) (*corefile.Option, error) { + if len(o.Args) < 1 { + return o, nil + } + o.Args = o.Args[:1] + return o, nil +} + +var proxyRemoveHttpsGoogleProtocol = func(o *corefile.Option) (*corefile.Option, error) { + if len(o.Args) > 0 && o.Args[0] == "https_google" { + return nil, nil + } + return o, nil +} + +func breakForwardStubDomainsIntoServerBlocks(cf *corefile.Corefile) (*corefile.Corefile, error) { + for _, sb := range cf.Servers { + for j, fwd := range sb.Plugins { + if fwd.Name != "forward" { + continue + } + if len(fwd.Args) == 0 { + return nil, errors.New("found invalid forward plugin declaration") + } + if fwd.Args[0] == "." { + // dont move the default upstream + continue + } + if len(sb.DomPorts) != 1 { + return cf, errors.New("unhandled migration of multi-domain/port server block") + } + if sb.DomPorts[0] != "." && sb.DomPorts[0] != ".:53" { + return cf, errors.New("unhandled migration of non-default domain/port server block") + } + + newSb := &corefile.Server{} // create a new server block + newSb.DomPorts = []string{fwd.Args[0]} // copy the forward zone to the server block domain + fwd.Args[0] = "." // the plugin's zone changes to "." for brevity + newSb.Plugins = append(newSb.Plugins, fwd) // add the plugin to its new server block + + // Add appropriate addtl plugins to new server block + newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "loop"}) + newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "errors"}) + newSb.Plugins = append(newSb.Plugins, &corefile.Plugin{Name: "cache", Args: []string{"30"}}) + + //add new server block to corefile + cf.Servers = append(cf.Servers, newSb) + + //remove the forward plugin from the original server block + sb.Plugins = append(sb.Plugins[:j], sb.Plugins[j+1:]...) + } + } + return cf, nil +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore index 1b8c7c26116..3b053c59ec2 100644 --- a/vendor/github.com/spf13/cobra/.gitignore +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -34,3 +34,5 @@ tags *.exe cobra.test + +.idea/* diff --git a/vendor/github.com/spf13/cobra/BUILD b/vendor/github.com/spf13/cobra/BUILD index 0e0e7e0169f..b5a95c3637b 100644 --- a/vendor/github.com/spf13/cobra/BUILD +++ b/vendor/github.com/spf13/cobra/BUILD @@ -9,6 +9,8 @@ go_library( "command.go", "command_notwin.go", "command_win.go", + "powershell_completions.go", + "shell_completions.go", "zsh_completions.go", ], importmap = "k8s.io/kubernetes/vendor/github.com/spf13/cobra", diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index ff16e3f60df..60c5a425bc9 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -23,6 +23,7 @@ Many of the most widely used Go projects are built using Cobra, such as: [Istio](https://istio.io), [Prototool](https://github.com/uber/prototool), [mattermost-server](https://github.com/mattermost/mattermost-server), +[Gardener](https://github.com/gardener/gardenctl), etc. [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) @@ -48,6 +49,7 @@ etc. * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) * [Generating documentation for your command](#generating-documentation-for-your-command) * [Generating bash completions](#generating-bash-completions) + * [Generating zsh completions](#generating-zsh-completions) - [Contributing](#contributing) - [License](#license) @@ -336,7 +338,7 @@ rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out A flag can also be assigned locally which will only apply to that specific command. ```go -rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") ``` ### Local Flag on Parent Commands @@ -719,6 +721,11 @@ Cobra can generate documentation based on subcommands, flags, etc. in the follow Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). +## Generating zsh completions + +Cobra can generate zsh-completion file. Read more about it in +[Zsh Completions](zsh_completions.md). + # Contributing 1. Fork it diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index c3c1e50188f..57bb8e1b3fd 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -545,51 +545,3 @@ func (c *Command) GenBashCompletionFile(filename string) error { return c.GenBashCompletion(outFile) } - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(c.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, -// and causes your command to report an error if invoked without the flag. -func (c *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(c.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, -// and causes your command to report an error if invoked without the flag. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (c *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(c.Flags(), name, f) -} - -// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(c.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index b257f91b6f1..c7e89830343 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -177,8 +177,6 @@ type Command struct { // that we can use on every pflag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - // output is an output writer defined by user. - output io.Writer // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. @@ -195,6 +193,13 @@ type Command struct { helpCommand *Command // versionTemplate is the version template defined by user. versionTemplate string + + // inReader is a reader defined by the user that replaces stdin + inReader io.Reader + // outWriter is a writer defined by the user that replaces stdout + outWriter io.Writer + // errWriter is a writer defined by the user that replaces stderr + errWriter io.Writer } // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden @@ -205,8 +210,28 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { - c.output = output + c.outWriter = output + c.errWriter = output +} + +// SetOut sets the destination for usage messages. +// If newOut is nil, os.Stdout is used. +func (c *Command) SetOut(newOut io.Writer) { + c.outWriter = newOut +} + +// SetErr sets the destination for error messages. +// If newErr is nil, os.Stderr is used. +func (c *Command) SetErr(newErr io.Writer) { + c.errWriter = newErr +} + +// SetOut sets the source for input data +// If newIn is nil, os.Stdin is used. +func (c *Command) SetIn(newIn io.Reader) { + c.inReader = newIn } // SetUsageFunc sets usage function. Usage can be defined by application. @@ -267,9 +292,19 @@ func (c *Command) OutOrStderr() io.Writer { return c.getOut(os.Stderr) } +// ErrOrStderr returns output to stderr +func (c *Command) ErrOrStderr() io.Writer { + return c.getErr(os.Stderr) +} + +// ErrOrStderr returns output to stderr +func (c *Command) InOrStdin() io.Reader { + return c.getIn(os.Stdin) +} + func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return c.output + if c.outWriter != nil { + return c.outWriter } if c.HasParent() { return c.parent.getOut(def) @@ -277,6 +312,26 @@ func (c *Command) getOut(def io.Writer) io.Writer { return def } +func (c *Command) getErr(def io.Writer) io.Writer { + if c.errWriter != nil { + return c.errWriter + } + if c.HasParent() { + return c.parent.getErr(def) + } + return def +} + +func (c *Command) getIn(def io.Reader) io.Reader { + if c.inReader != nil { + return c.inReader + } + if c.HasParent() { + return c.parent.getIn(def) + } + return def +} + // UsageFunc returns either the function set by SetUsageFunc for this command // or a parent, or it returns a default usage function. func (c *Command) UsageFunc() (f func(*Command) error) { @@ -329,13 +384,22 @@ func (c *Command) Help() error { return nil } -// UsageString return usage string. +// UsageString returns usage string. func (c *Command) UsageString() string { - tmpOutput := c.output + // Storing normal writers + tmpOutput := c.outWriter + tmpErr := c.errWriter + bb := new(bytes.Buffer) - c.SetOutput(bb) + c.outWriter = bb + c.errWriter = bb + c.Usage() - c.output = tmpOutput + + // Setting things back to normal + c.outWriter = tmpOutput + c.errWriter = tmpErr + return bb.String() } @@ -1068,6 +1132,21 @@ func (c *Command) Printf(format string, i ...interface{}) { c.Print(fmt.Sprintf(format, i...)) } +// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErr(i ...interface{}) { + fmt.Fprint(c.ErrOrStderr(), i...) +} + +// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrln(i ...interface{}) { + c.Print(fmt.Sprintln(i...)) +} + +// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrf(format string, i ...interface{}) { + c.Print(fmt.Sprintf(format, i...)) +} + // CommandPath returns the full path to this command. func (c *Command) CommandPath() string { if c.HasParent() { diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go new file mode 100644 index 00000000000..756c61b9dcb --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -0,0 +1,100 @@ +// PowerShell completions are based on the amazing work from clap: +// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs +// +// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but +// can be downloaded separately for windows 7 or 8.1). + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/pflag" +) + +var powerShellCompletionTemplate = `using namespace System.Management.Automation +using namespace System.Management.Automation.Language +Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { + param($wordToComplete, $commandAst, $cursorPosition) + $commandElements = $commandAst.CommandElements + $command = @( + '%s' + for ($i = 1; $i -lt $commandElements.Count; $i++) { + $element = $commandElements[$i] + if ($element -isnot [StringConstantExpressionAst] -or + $element.StringConstantType -ne [StringConstantType]::BareWord -or + $element.Value.StartsWith('-')) { + break + } + $element.Value + } + ) -join ';' + $completions = @(switch ($command) {%s + }) + $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | + Sort-Object -Property ListItemText +}` + +func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { + var cmdName string + if previousCommandName == "" { + cmdName = cmd.Name() + } else { + cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) + } + + fmt.Fprintf(out, "\n '%s' {", cmdName) + + cmd.Flags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + usage := escapeStringForPowerShell(flag.Usage) + if len(flag.Shorthand) > 0 { + fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) + } + fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) + }) + + for _, subCmd := range cmd.Commands() { + usage := escapeStringForPowerShell(subCmd.Short) + fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) + } + + fmt.Fprint(out, "\n break\n }") + + for _, subCmd := range cmd.Commands() { + generatePowerShellSubcommandCases(out, subCmd, cmdName) + } +} + +func escapeStringForPowerShell(s string) string { + return strings.Replace(s, "'", "''", -1) +} + +// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + + var subCommandCases bytes.Buffer + generatePowerShellSubcommandCases(&subCommandCases, c, "") + fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) + + _, err := buf.WriteTo(w) + return err +} + +// GenPowerShellCompletionFile generates PowerShell completion file. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenPowerShellCompletion(outFile) +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md new file mode 100644 index 00000000000..afed8024087 --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.md @@ -0,0 +1,14 @@ +# Generating PowerShell Completions For Your Own cobra.Command + +Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +# What's supported + +- Completion for subcommands using their `.Short` description +- Completion for non-hidden flags using their `.Name` and `.Shorthand` + +# What's not yet supported + +- Command aliases +- Required, filename or custom flags (they will work like normal flags) +- Custom completion scripts diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go new file mode 100644 index 00000000000..ba0af9cb553 --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -0,0 +1,85 @@ +package cobra + +import ( + "github.com/spf13/pflag" +) + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename instructs the various shell completion +// implementations to limit completions for this persistent flag to the +// specified extensions (patterns). +// +// Shell Completion compatibility matrix: bash, zsh +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for this flag to the specified extensions (patterns). +// +// Shell Completion compatibility matrix: bash, zsh +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom instructs the various shell completion implementations to +// limit completions for this flag to the specified extensions (patterns). +// +// Shell Completion compatibility matrix: bash, zsh +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// complete only directories with this named flag. +// +// Shell Completion compatibility matrix: zsh +func (c *Command) MarkFlagDirname(name string) error { + return MarkFlagDirname(c.Flags(), name) +} + +// MarkPersistentFlagDirname instructs the various shell completion +// implementations to complete only directories with this persistent named flag. +// +// Shell Completion compatibility matrix: zsh +func (c *Command) MarkPersistentFlagDirname(name string) error { + return MarkFlagDirname(c.PersistentFlags(), name) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// complete only directories with this specified flag. +// +// Shell Completion compatibility matrix: zsh +func MarkFlagDirname(flags *pflag.FlagSet, name string) error { + zshPattern := "-(/)" + return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern}) +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go index 889c22e273c..12755482f0c 100644 --- a/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,13 +1,102 @@ package cobra import ( - "bytes" + "encoding/json" "fmt" "io" "os" + "sort" "strings" + "text/template" + + "github.com/spf13/pflag" ) +const ( + zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation" + zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion" + zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion" + zshCompDirname = "cobra_annotations_zsh_dirname" +) + +var ( + zshCompFuncMap = template.FuncMap{ + "genZshFuncName": zshCompGenFuncName, + "extractFlags": zshCompExtractFlag, + "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments, + "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering, + } + zshCompletionText = ` +{{/* should accept Command (that contains subcommands) as parameter */}} +{{define "argumentsC" -}} +{{ $cmdPath := genZshFuncName .}} +function {{$cmdPath}} { + local -a commands + + _arguments -C \{{- range extractFlags .}} + {{genFlagEntryForZshArguments .}} \{{- end}} + "1: :->cmnds" \ + "*::arg:->args" + + case $state in + cmnds) + commands=({{range .Commands}}{{if not .Hidden}} + "{{.Name}}:{{.Short}}"{{end}}{{end}} + ) + _describe "command" commands + ;; + esac + + case "$words[1]" in {{- range .Commands}}{{if not .Hidden}} + {{.Name}}) + {{$cmdPath}}_{{.Name}} + ;;{{end}}{{end}} + esac +} +{{range .Commands}}{{if not .Hidden}} +{{template "selectCmdTemplate" .}} +{{- end}}{{end}} +{{- end}} + +{{/* should accept Command without subcommands as parameter */}} +{{define "arguments" -}} +function {{genZshFuncName .}} { +{{" _arguments"}}{{range extractFlags .}} \ + {{genFlagEntryForZshArguments . -}} +{{end}}{{range extractArgsCompletions .}} \ + {{.}}{{end}} +} +{{end}} + +{{/* dispatcher for commands with or without subcommands */}} +{{define "selectCmdTemplate" -}} +{{if .Hidden}}{{/* ignore hidden*/}}{{else -}} +{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}} +{{- end}} +{{- end}} + +{{/* template entry point */}} +{{define "Main" -}} +#compdef _{{.Name}} {{.Name}} + +{{template "selectCmdTemplate" .}} +{{end}} +` +) + +// zshCompArgsAnnotation is used to encode/decode zsh completion for +// arguments to/from Command.Annotations. +type zshCompArgsAnnotation map[int]zshCompArgHint + +type zshCompArgHint struct { + // Indicates the type of the completion to use. One of: + // zshCompArgumentFilenameComp or zshCompArgumentWordComp + Tipe string `json:"type"` + + // A value for the type above (globs for file completion or words) + Options []string `json:"options"` +} + // GenZshCompletionFile generates zsh completion file. func (c *Command) GenZshCompletionFile(filename string) error { outFile, err := os.Create(filename) @@ -19,108 +108,229 @@ func (c *Command) GenZshCompletionFile(filename string) error { return c.GenZshCompletion(outFile) } -// GenZshCompletion generates a zsh completion file and writes to the passed writer. +// GenZshCompletion generates a zsh completion file and writes to the passed +// writer. The completion always run on the root command regardless of the +// command it was called from. func (c *Command) GenZshCompletion(w io.Writer) error { - buf := new(bytes.Buffer) - - writeHeader(buf, c) - maxDepth := maxDepth(c) - writeLevelMapping(buf, maxDepth) - writeLevelCases(buf, maxDepth, c) - - _, err := buf.WriteTo(w) - return err -} - -func writeHeader(w io.Writer, cmd *Command) { - fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) -} - -func maxDepth(c *Command) int { - if len(c.Commands()) == 0 { - return 0 + tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText) + if err != nil { + return fmt.Errorf("error creating zsh completion template: %v", err) } - maxDepthSub := 0 - for _, s := range c.Commands() { - subDepth := maxDepth(s) - if subDepth > maxDepthSub { - maxDepthSub = subDepth + return tmpl.Execute(w, c.Root()) +} + +// MarkZshCompPositionalArgumentFile marks the specified argument (first +// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are +// optional - if not provided the completion will search for all files. +func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { + if argPosition < 1 { + return fmt.Errorf("Invalid argument position (%d)", argPosition) + } + annotation, err := c.zshCompGetArgsAnnotations() + if err != nil { + return err + } + if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { + return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) + } + annotation[argPosition] = zshCompArgHint{ + Tipe: zshCompArgumentFilenameComp, + Options: patterns, + } + return c.zshCompSetArgsAnnotations(annotation) +} + +// MarkZshCompPositionalArgumentWords marks the specified positional argument +// (first argument is 1) as completed by the provided words. At east one word +// must be provided, spaces within words will be offered completion with +// "word\ word". +func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { + if argPosition < 1 { + return fmt.Errorf("Invalid argument position (%d)", argPosition) + } + if len(words) == 0 { + return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition) + } + annotation, err := c.zshCompGetArgsAnnotations() + if err != nil { + return err + } + if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { + return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) + } + annotation[argPosition] = zshCompArgHint{ + Tipe: zshCompArgumentWordComp, + Options: words, + } + return c.zshCompSetArgsAnnotations(annotation) +} + +func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) { + var result []string + annotation, err := c.zshCompGetArgsAnnotations() + if err != nil { + return nil, err + } + for k, v := range annotation { + s, err := zshCompRenderZshCompArgHint(k, v) + if err != nil { + return nil, err + } + result = append(result, s) + } + if len(c.ValidArgs) > 0 { + if _, positionOneExists := annotation[1]; !positionOneExists { + s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{ + Tipe: zshCompArgumentWordComp, + Options: c.ValidArgs, + }) + if err != nil { + return nil, err + } + result = append(result, s) } } - return 1 + maxDepthSub + sort.Strings(result) + return result, nil } -func writeLevelMapping(w io.Writer, numLevels int) { - fmt.Fprintln(w, `_arguments \`) - for i := 1; i <= numLevels; i++ { - fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) - fmt.Fprintln(w) - } - fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") - fmt.Fprintln(w) -} - -func writeLevelCases(w io.Writer, maxDepth int, root *Command) { - fmt.Fprintln(w, "case $state in") - defer fmt.Fprintln(w, "esac") - - for i := 1; i <= maxDepth; i++ { - fmt.Fprintf(w, " level%d)\n", i) - writeLevel(w, root, i) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") -} - -func writeLevel(w io.Writer, root *Command, i int) { - fmt.Fprintf(w, " case $words[%d] in\n", i) - defer fmt.Fprintln(w, " esac") - - commands := filterByLevel(root, i) - byParent := groupByParent(commands) - - for p, c := range byParent { - names := names(c) - fmt.Fprintf(w, " %s)\n", p) - fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) - fmt.Fprintln(w, " ;;") - } - fmt.Fprintln(w, " *)") - fmt.Fprintln(w, " _arguments '*: :_files'") - fmt.Fprintln(w, " ;;") - -} - -func filterByLevel(c *Command, l int) []*Command { - cs := make([]*Command, 0) - if l == 0 { - cs = append(cs, c) - return cs - } - for _, s := range c.Commands() { - cs = append(cs, filterByLevel(s, l-1)...) - } - return cs -} - -func groupByParent(commands []*Command) map[string][]*Command { - m := make(map[string][]*Command) - for _, c := range commands { - parent := c.Parent() - if parent == nil { - continue +func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) { + switch t := z.Tipe; t { + case zshCompArgumentFilenameComp: + var globs []string + for _, g := range z.Options { + globs = append(globs, fmt.Sprintf(`-g "%s"`, g)) } - m[parent.Name()] = append(m[parent.Name()], c) + return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil + case zshCompArgumentWordComp: + var words []string + for _, w := range z.Options { + words = append(words, fmt.Sprintf("%q", w)) + } + return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil + default: + return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t) } - return m } -func names(commands []*Command) []string { - ns := make([]string, len(commands)) - for i, c := range commands { - ns[i] = c.Name() - } - return ns +func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool { + _, dup := annotation[position] + return dup +} + +func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) { + annotation := make(zshCompArgsAnnotation) + annotationString, ok := c.Annotations[zshCompArgumentAnnotation] + if !ok { + return annotation, nil + } + err := json.Unmarshal([]byte(annotationString), &annotation) + if err != nil { + return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err) + } + return annotation, nil +} + +func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error { + jsn, err := json.Marshal(annotation) + if err != nil { + return fmt.Errorf("Error marshaling zsh argument annotation: %v", err) + } + if c.Annotations == nil { + c.Annotations = make(map[string]string) + } + c.Annotations[zshCompArgumentAnnotation] = string(jsn) + return nil +} + +func zshCompGenFuncName(c *Command) string { + if c.HasParent() { + return zshCompGenFuncName(c.Parent()) + "_" + c.Name() + } + return "_" + c.Name() +} + +func zshCompExtractFlag(c *Command) []*pflag.Flag { + var flags []*pflag.Flag + c.LocalFlags().VisitAll(func(f *pflag.Flag) { + if !f.Hidden { + flags = append(flags, f) + } + }) + c.InheritedFlags().VisitAll(func(f *pflag.Flag) { + if !f.Hidden { + flags = append(flags, f) + } + }) + return flags +} + +// zshCompGenFlagEntryForArguments returns an entry that matches _arguments +// zsh-completion parameters. It's too complicated to generate in a template. +func zshCompGenFlagEntryForArguments(f *pflag.Flag) string { + if f.Name == "" || f.Shorthand == "" { + return zshCompGenFlagEntryForSingleOptionFlag(f) + } + return zshCompGenFlagEntryForMultiOptionFlag(f) +} + +func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string { + var option, multiMark, extras string + + if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { + multiMark = "*" + } + + option = "--" + f.Name + if option == "--" { + option = "-" + f.Shorthand + } + extras = zshCompGenFlagEntryExtras(f) + + return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras) +} + +func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string { + var options, parenMultiMark, curlyMultiMark, extras string + + if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { + parenMultiMark = "*" + curlyMultiMark = "\\*" + } + + options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`, + parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name) + extras = zshCompGenFlagEntryExtras(f) + + return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras) +} + +func zshCompGenFlagEntryExtras(f *pflag.Flag) string { + if f.NoOptDefVal != "" { + return "" + } + + extras := ":" // allow options for flag (even without assistance) + for key, values := range f.Annotations { + switch key { + case zshCompDirname: + extras = fmt.Sprintf(":filename:_files -g %q", values[0]) + case BashCompFilenameExt: + extras = ":filename:_files" + for _, pattern := range values { + extras = extras + fmt.Sprintf(` -g "%s"`, pattern) + } + } + } + + return extras +} + +func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool { + return strings.Contains(f.Value.Type(), "Slice") || + strings.Contains(f.Value.Type(), "Array") +} + +func zshCompQuoteFlagDescription(s string) string { + return strings.Replace(s, "'", `'\''`, -1) } diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md new file mode 100644 index 00000000000..df9c2eac93c --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.md @@ -0,0 +1,39 @@ +## Generating Zsh Completion for your cobra.Command + +Cobra supports native Zsh completion generated from the root `cobra.Command`. +The generated completion script should be put somewhere in your `$fpath` named +`_`. + +### What's Supported + +* Completion for all non-hidden subcommands using their `.Short` description. +* Completion for all non-hidden flags using the following rules: + * Filename completion works by marking the flag with `cmd.MarkFlagFilename...` + family of commands. + * The requirement for argument to the flag is decided by the `.NoOptDefVal` + flag value - if it's empty then completion will expect an argument. + * Flags of one of the various `*Array` and `*Slice` types supports multiple + specifications (with or without argument depending on the specific type). +* Completion of positional arguments using the following rules: + * Argument position for all options below starts at `1`. If argument position + `0` is requested it will raise an error. + * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob + patterns (e.g. `"*.log"`) are optional - if not specified it will offer to + complete all file types. + * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for + completion. At least one word is required. + * It's possible to specify completion for some arguments and leave some + unspecified (e.g. offer words for second argument but nothing for first + argument). This will cause no completion for first argument but words + completion for second argument. + * If no argument completion was specified for 1st argument (but optionally was + specified for 2nd) and the command has `ValidArgs` it will be used as + completion options for 1st argument. + * Argument completions only offered for commands with no subcommands. + +### What's not yet Supported + +* Custom completion scripts are not supported yet (We should probably create zsh + specific one, doesn't make sense to re-use the bash one as the functions will + be different). +* Whatever other feature you're looking for and doesn't exist :) diff --git a/vendor/modules.txt b/vendor/modules.txt index eb10b750583..e96e1d7787d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -131,7 +131,7 @@ github.com/bazelbuild/buildtools/wspace github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.0+incompatible => github.com/blang/semver v3.5.0+incompatible github.com/blang/semver -# github.com/caddyserver/caddy v1.0.1 => github.com/caddyserver/caddy v1.0.1 +# github.com/caddyserver/caddy v1.0.3 => github.com/caddyserver/caddy v1.0.3 github.com/caddyserver/caddy/caddyfile # github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c => github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c github.com/cespare/prettybench @@ -182,6 +182,9 @@ github.com/containernetworking/cni/pkg/types github.com/containernetworking/cni/pkg/types/020 github.com/containernetworking/cni/pkg/types/current github.com/containernetworking/cni/pkg/version +# github.com/coredns/corefile-migration v1.0.2 => github.com/coredns/corefile-migration v1.0.2 +github.com/coredns/corefile-migration/migration +github.com/coredns/corefile-migration/migration/corefile # github.com/coreos/bbolt v1.3.1-coreos.6 => github.com/coreos/bbolt v1.3.1-coreos.6 github.com/coreos/bbolt # github.com/coreos/etcd v3.3.13+incompatible => github.com/coreos/etcd v3.3.13+incompatible @@ -737,7 +740,7 @@ github.com/spf13/afero github.com/spf13/afero/mem # github.com/spf13/cast v1.3.0 => github.com/spf13/cast v1.3.0 github.com/spf13/cast -# github.com/spf13/cobra v0.0.4 => github.com/spf13/cobra v0.0.4 +# github.com/spf13/cobra v0.0.5 => github.com/spf13/cobra v0.0.5 github.com/spf13/cobra github.com/spf13/cobra/doc # github.com/spf13/jwalterweatherman v1.1.0 => github.com/spf13/jwalterweatherman v1.1.0