Merge branch 'master' into proxy/server

This commit is contained in:
JieJhih 2019-04-27 13:48:29 +08:00 committed by GitHub
commit 94731c5dcd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1050 changed files with 19359 additions and 27467 deletions

View File

@ -25,4 +25,5 @@ If the matter is security related, please disclose it privately via https://kube
- OS (e.g: `cat /etc/os-release`):
- Kernel (e.g. `uname -a`):
- Install tools:
- Network plugin and version (if this is a network-related bug):
- Others:

View File

@ -2,7 +2,7 @@
1. If this is your first time, please read our contributor guidelines: https://git.k8s.io/community/contributors/guide#your-first-contribution and developer guide https://git.k8s.io/community/contributors/devel/development.md#development-guide
2. Please label this pull request according to what type of issue you are addressing, especially if this is a release targeted pull request. For reference on required PR/issue labels, read here:
https://git.k8s.io/community/contributors/devel/release.md#issue-kind-label
https://git.k8s.io/community/contributors/devel/sig-release/release.md#issuepr-kind-label
3. Ensure you have added or ran the appropriate tests for your PR: https://git.k8s.io/community/contributors/devel/testing.md
4. If you want *faster* PR reviews, read how: https://git.k8s.io/community/contributors/guide/pull-requests.md#best-practices-for-faster-reviews
5. Follow the instructions for writing a release note: https://git.k8s.io/community/contributors/guide/release-notes.md

View File

@ -1,59 +1,66 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.12.7](#v1127)
- [Downloads for v1.12.7](#downloads-for-v1127)
- [v1.12.8](#v1128)
- [Downloads for v1.12.8](#downloads-for-v1128)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.12.6](#changelog-since-v1126)
- [Changelog since v1.12.7](#changelog-since-v1127)
- [Other notable changes](#other-notable-changes)
- [v1.12.6](#v1126)
- [Downloads for v1.12.6](#downloads-for-v1126)
- [v1.12.7](#v1127)
- [Downloads for v1.12.7](#downloads-for-v1127)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.12.5](#changelog-since-v1125)
- [Changelog since v1.12.6](#changelog-since-v1126)
- [Other notable changes](#other-notable-changes-1)
- [v1.12.5](#v1125)
- [Downloads for v1.12.5](#downloads-for-v1125)
- [v1.12.6](#v1126)
- [Downloads for v1.12.6](#downloads-for-v1126)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.12.4](#changelog-since-v1124)
- [Changelog since v1.12.5](#changelog-since-v1125)
- [Other notable changes](#other-notable-changes-2)
- [v1.12.4](#v1124)
- [Downloads for v1.12.4](#downloads-for-v1124)
- [v1.12.5](#v1125)
- [Downloads for v1.12.5](#downloads-for-v1125)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.12.3](#changelog-since-v1123)
- [Action Required](#action-required)
- [Changelog since v1.12.4](#changelog-since-v1124)
- [Other notable changes](#other-notable-changes-3)
- [v1.12.3](#v1123)
- [Downloads for v1.12.3](#downloads-for-v1123)
- [v1.12.4](#v1124)
- [Downloads for v1.12.4](#downloads-for-v1124)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.12.2](#changelog-since-v1122)
- [Changelog since v1.12.3](#changelog-since-v1123)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-4)
- [v1.12.2](#v1122)
- [Downloads for v1.12.2](#downloads-for-v1122)
- [v1.12.3](#v1123)
- [Downloads for v1.12.3](#downloads-for-v1123)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Changelog since v1.12.1](#changelog-since-v1121)
- [Changelog since v1.12.2](#changelog-since-v1122)
- [Other notable changes](#other-notable-changes-5)
- [v1.12.1](#v1121)
- [Downloads for v1.12.1](#downloads-for-v1121)
- [v1.12.2](#v1122)
- [Downloads for v1.12.2](#downloads-for-v1122)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [Changelog since v1.12.0](#changelog-since-v1120)
- [Changelog since v1.12.1](#changelog-since-v1121)
- [Other notable changes](#other-notable-changes-6)
- [v1.12.0](#v1120)
- [Downloads for v1.12.0](#downloads-for-v1120)
- [v1.12.1](#v1121)
- [Downloads for v1.12.1](#downloads-for-v1121)
- [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7)
- [Changelog since v1.12.0](#changelog-since-v1120)
- [Other notable changes](#other-notable-changes-7)
- [v1.12.0](#v1120)
- [Downloads for v1.12.0](#downloads-for-v1120)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Known Issues](#known-issues)
- [Major Themes](#major-themes)
- [SIG API Machinery](#sig-api-machinery)
@ -75,7 +82,7 @@
- [Deprecations and removals](#deprecations-and-removals)
- [New Features](#new-features)
- [API Changes](#api-changes)
- [Other Notable Changes](#other-notable-changes-7)
- [Other Notable Changes](#other-notable-changes-8)
- [SIG API Machinery](#sig-api-machinery-1)
- [SIG Apps](#sig-apps)
- [SIG Auth](#sig-auth)
@ -94,54 +101,127 @@
- [SIG Storage](#sig-storage-1)
- [SIG VMWare](#sig-vmware-1)
- [SIG Windows](#sig-windows-1)
- [Other Notable Changes](#other-notable-changes-8)
- [Other Notable Changes](#other-notable-changes-9)
- [Bug Fixes](#bug-fixes)
- [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing)
- [External Dependencies](#external-dependencies)
- [v1.12.0-rc.2](#v1120-rc2)
- [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1)
- [Other notable changes](#other-notable-changes-9)
- [v1.12.0-rc.1](#v1120-rc1)
- [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1)
- [Client Binaries](#client-binaries-9)
- [Server Binaries](#server-binaries-9)
- [Node Binaries](#node-binaries-9)
- [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2)
- [Action Required](#action-required-2)
- [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1)
- [Other notable changes](#other-notable-changes-10)
- [v1.12.0-beta.2](#v1120-beta2)
- [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2)
- [v1.12.0-rc.1](#v1120-rc1)
- [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1)
- [Client Binaries](#client-binaries-10)
- [Server Binaries](#server-binaries-10)
- [Node Binaries](#node-binaries-10)
- [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1)
- [Action Required](#action-required-3)
- [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-11)
- [v1.12.0-beta.1](#v1120-beta1)
- [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1)
- [v1.12.0-beta.2](#v1120-beta2)
- [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2)
- [Client Binaries](#client-binaries-11)
- [Server Binaries](#server-binaries-11)
- [Node Binaries](#node-binaries-11)
- [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1)
- [Action Required](#action-required-4)
- [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-12)
- [v1.12.0-alpha.1](#v1120-alpha1)
- [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1)
- [v1.12.0-beta.1](#v1120-beta1)
- [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1)
- [Client Binaries](#client-binaries-12)
- [Server Binaries](#server-binaries-12)
- [Node Binaries](#node-binaries-12)
- [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-13)
- [v1.12.0-alpha.1](#v1120-alpha1)
- [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1)
- [Client Binaries](#client-binaries-13)
- [Server Binaries](#server-binaries-13)
- [Node Binaries](#node-binaries-13)
- [Changelog since v1.11.0](#changelog-since-v1110)
- [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-13)
- [Other notable changes](#other-notable-changes-14)
<!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY -->
# v1.12.8
[Documentation](https://docs.k8s.io)
## Downloads for v1.12.8
filename | sha512 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes.tar.gz) | `0f14f54bcd3ef8260e424ccb9be4d1d7ad8d03e15d00d081fdf564adc319ca4040d404f37466a2342650d08d5c41b1d411f172ff78e611b05fca8fd5404590d9`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-src.tar.gz) | `10b6ce78a906effbb38600d8e496c49e9739fffaba8d44eff54d298b0f899481b9e4cc60eb918586f3d1055f4db44880fd2b42ad40a391aadfd8a53c584c8c1c`
### Client Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-darwin-386.tar.gz) | `bfb3680c47f674773c50c446577eb3f10468a6fd367a2ee7f851d299f4ff04071757962ddff10659b185ab80e4fc474f10354273560803101b66c9c939279e08`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-darwin-amd64.tar.gz) | `b6543d97975add3a27f75ff6fcc7c3caeb8749ac88967cb79a6688ba4ba1837fda3582a0f5588073a855a2da43c9b353b565974b7a29f619709f862d8ce1e0b3`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-386.tar.gz) | `57358d71b4c19d826e4979b1ef3f33b5b1e05c50ba257d6bbfa8d76f15849ebcba389c55f1be50fdc77a311935a0e7ecc827a3f35ee5896a6ceda7580d8b4680`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-amd64.tar.gz) | `55e6c2ec67aa3283e3b6904418b35845fa14f5faaed0cf503a7adb4e52842f7c3aaa5fbbfdbcf508794c784d93bec48e27e598879e89302c48f54eebdef69d3d`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-arm.tar.gz) | `0a7b54f8846ddf9d6ef6df863a0211ab448dfbdeeaf78ec163b4e46fa4d7f92611f71ac757bb00d6dfee6314c78ac12cf50020d8d6c9b1dbac550425ccb53743`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-arm64.tar.gz) | `ad68df3f56c2622a01f54a8575c7cec3b9f508c1332bd16cf3f39b9e3f66dae3b495fc1dce3d69504f18b0feb281268fed306538db538d01e74210be45bafb97`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-ppc64le.tar.gz) | `1452011ed3f37984ff9493df0d490eefb8a5c0d84c2f87d9ff47ffe9924a14d918c5dfa755494c05975a10b191d75173d0d30be3449e36cffff4b0495f22efa8`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-linux-s390x.tar.gz) | `edca658d8f91dd4939c6eba444b2b56a30304d3d0c42607e823acf64dace852cc66a8f14d4bf2fc2bdf0c99bbbe4a9625c86f38535d5b24e8c3b95e76193e530`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-windows-386.tar.gz) | `14e5daaf4623d11380b552fc3fa5ad6bf98488dcf365c8cfa8d7f1d26fe73b317e5cfeb3e46f4e9d582e2a04cd70bc2ed3dfb915c88aacd997324ca8c2582d52`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-client-windows-amd64.tar.gz) | `1a1d4457620daf2f54e11b2ef790f30890bca71502f86a3c0163a4e6a5afb701c3d60511b944eb4b80c9418e7ee6864d44aba26deacd44a717a5c8c4850794af`
### Server Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-amd64.tar.gz) | `8d1a70cfa9012282f679d876ae070f7830aad11ef64f437b90320ccae5253a3f527df0abb56f34004ccb2113e195638b6ca69aad9aff85f9dcb588aacca81d55`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-arm.tar.gz) | `67022706b4bf98aba305fd3759940ce396e35474814ced4152152b4cc536d79e1b4e3a4027e45af3637ea006fbabadca34d8ecc6874138300230b2b0bcd2dcb5`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-arm64.tar.gz) | `00d7e79fa71f4265b8ba5cc2e62c2ab4b5d1076bddc8155a3b7a5e589c34446860c25571b972566e694709230d32de763ac3ba0a97a2cc2cbc6c7b431b30a1e0`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-ppc64le.tar.gz) | `8add81c5f767dbdd04ac39f07aa4855be86c91f848c2e331d40734e85d0d6c7ea5cd0c575ab49b101c1e6ba5224eb1762f8f73f39610c773b76f7d1ebffc86cc`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-server-linux-s390x.tar.gz) | `63e58ea49072ac058e74b989f9a74887b27c52d56923f44a7d53cb384915f4a2425e65d6e9f6642d4fbac102bb9a45baad901a32a5414989c0b2d2cc57ffa59f`
### Node Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-amd64.tar.gz) | `6677af5330149f39c6d84722e5418bf35caf4d431fb97fd0df102373a5faaf4a8344921bc2a51290abe521620f6543c482a54720692d245ff36142019fcc0c19`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-arm.tar.gz) | `aa0f2abaec8ac765acffe1c6ff00c01cd74befba98a5c7afb30f716bd37f9094e1c314df7f3b7c8361c86e6c78f9aa246623e5f1934d60148663851810aa4815`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-arm64.tar.gz) | `c4bb230afcf78414461b32cedd0564a58e02e82b0a679ea42f977e3bc501cc4857694774dad423b4a76542a4698929c8f5429b9737f1e324d21e39afbc5be48f`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-ppc64le.tar.gz) | `31d4bdc1528ee8d4ab4ee16e3da08c1e8c026eaafd88950836f19e10bf3e87d12876a25e2c90a81529a9d26f8465cf50e8997e8c14fb555f21d69323a885f2eb`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-linux-s390x.tar.gz) | `d7b8a81b14a12578ca6273dc32703f906627e244ed00639436fb3cb38d4b4aa55d7a857f9a844844bc2d463619b890329043b78c9ec8ff0f5b38dc55b572cd71`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.8/kubernetes-node-windows-amd64.tar.gz) | `9ca9ef41a42d8cb5c15533a10848170fa0f7c1e4eccbc8d6269ce085ab7670e446473e4e240b3bb1905dda3078725976b0506a27d67e6e3a2fd546aaa6678d84`
## Changelog since v1.12.7
### Other notable changes
* Connections from Pods to Services with 0 endpoints will now ICMP reject immediately, rather than blackhole and timeout. ([#72534](https://github.com/kubernetes/kubernetes/pull/72534), [@thockin](https://github.com/thockin))
* Services of type=LoadBalancer which have no endpoints will now immediately ICMP reject connections, rather than time out. ([#74394](https://github.com/kubernetes/kubernetes/pull/74394), [@thockin](https://github.com/thockin))
* Ensure the backend pools are set correctly for Azure SLB with multiple backend pools (e.g. outbound rules) ([#76691](https://github.com/kubernetes/kubernetes/pull/76691), [@feiskyer](https://github.com/feiskyer))
* Connections from Pods to Services with 0 endpoints will now ICMP reject immediately, rather than blackhole and timeout. ([#72534](https://github.com/kubernetes/kubernetes/pull/72534), [@thockin](https://github.com/thockin))
* Services of type=LoadBalancer which have no endpoints will now immediately ICMP reject connections, rather than time out. ([#74394](https://github.com/kubernetes/kubernetes/pull/74394), [@thockin](https://github.com/thockin))
* fix race condition issue for smb mount on windows ([#75371](https://github.com/kubernetes/kubernetes/pull/75371), [@andyzhangx](https://github.com/andyzhangx))
* fix smb unmount issue on Windows ([#75087](https://github.com/kubernetes/kubernetes/pull/75087), [@andyzhangx](https://github.com/andyzhangx))
* Increase Azure default maximumLoadBalancerRuleCount to 250. ([#72621](https://github.com/kubernetes/kubernetes/pull/72621), [@feiskyer](https://github.com/feiskyer))
* Fixes bug in DaemonSetController causing it to stop processing some DaemonSets for 5 minutes after node removal. ([#76060](https://github.com/kubernetes/kubernetes/pull/76060), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski))
* Fixes a NPD bug on GCI, so that it disables glog writing to files for log-counter ([#76211](https://github.com/kubernetes/kubernetes/pull/76211), [@wangzhen127](https://github.com/wangzhen127))
* [stackdriver addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes. ([#75362](https://github.com/kubernetes/kubernetes/pull/75362), [@serathius](https://github.com/serathius))
* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.1 to pick up security fixes.
* [fluentd-gcp addon] Bump event-exporter to v0.2.4 to pick up security fixes.
* [fluentd-gcp addon] Bump prometheus-to-sd to v0.5.0 to pick up security fixes.
* [metatada-proxy addon] Bump prometheus-to-sd v0.5.0 to pick up security fixes.
* Fixed parsing of fsType in AWS StorageClass parameters ([#75944](https://github.com/kubernetes/kubernetes/pull/75944), [@jsafrane](https://github.com/jsafrane))
* Node-Problem-Detector configuration is now decoupled from the Kubernetes release on GKE/GCE. ([#73288](https://github.com/kubernetes/kubernetes/pull/73288), [@wangzhen127](https://github.com/wangzhen127))
* [IPVS] Allow for transparent kube-proxy restarts ([#75283](https://github.com/kubernetes/kubernetes/pull/75283), [@lbernail](https://github.com/lbernail))
# v1.12.7
[Documentation](https://docs.k8s.io)

295
Godeps/LICENSES generated
View File

@ -427,7 +427,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -441,7 +441,7 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/cloud.google.com/go/LICENSE a873c5645c184d51e0f9b34e1d7cf559
= vendor/cloud.google.com/go/LICENSE 3b83ef96387f14655fc854ddc3c6bd57
================================================================================
@ -11507,6 +11507,35 @@ under a dual license of LGPLv3+ or GPLv2.
================================================================================
================================================================================
= vendor/github.com/hpcloud/tail licensed under: =
# The MIT License (MIT)
# © Copyright 2015 Hewlett Packard Enterprise Development LP
Copyright (c) 2014 ActiveState
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
= vendor/github.com/hpcloud/tail/LICENSE.txt 0bdce43b16cd5c587124d6f274632c87
================================================================================
================================================================================
= vendor/github.com/imdario/mergo licensed under: =
@ -16617,191 +16646,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/rancher/go-rancher licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
= vendor/github.com/rancher/go-rancher/LICENSE 2ee41112a44fe7014dce33e26468ba93
================================================================================
================================================================================
= vendor/github.com/robfig/cron licensed under: =
@ -19051,7 +18895,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
= vendor/golang.org/x/oauth2 licensed under: =
Copyright (c) 2009 The oauth2 Authors. All rights reserved.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@ -19079,7 +18923,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/golang.org/x/oauth2/LICENSE 704b1e0c436dbf193e7dcbd4cf06ec81
= vendor/golang.org/x/oauth2/LICENSE 5d4950ecb7b26d2c5e4e7b4e0dd74707
================================================================================
@ -19918,6 +19762,42 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/gopkg.in/fsnotify.v1 licensed under: =
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/gopkg.in/fsnotify.v1/LICENSE c38914c9a7ab03bb2b96d4baaee10769
================================================================================
================================================================================
= vendor/gopkg.in/gcfg.v1 licensed under: =
@ -20228,6 +20108,43 @@ SOFTWARE.
================================================================================
================================================================================
= vendor/gopkg.in/tomb.v1 licensed under: =
tomb - support for clean goroutine termination in Go.
Copyright (c) 2010-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/gopkg.in/tomb.v1/LICENSE 95d4102f39f26da9b66fee5d05ac597b
================================================================================
================================================================================
= vendor/gopkg.in/warnings.v0 licensed under: =

View File

@ -345,9 +345,11 @@ aliases:
# api-reviewers targeted by sig area
# see https://git.k8s.io/community/sig-architecture/api-review-process.md#training-reviews
# sig-api-machinery-api-reviewers:
# -
# -
sig-api-machinery-api-reviewers:
- caesarxuchao
- deads2k
- jpbetz
- sttts
# sig-apps-api-reviewers:
# -
@ -357,10 +359,11 @@ aliases:
# -
# -
# sig-cli-api-reviewers:
# -
# -
sig-cli-api-reviewers:
- pwittrock
- soltysh
sig-cloud-provider-api-reviewers:
- andrewsykim
- cheftako
@ -381,13 +384,15 @@ aliases:
- bsalamat
- k82cn
# sig-storage-api-reviewers:
# -
# -
sig-storage-api-reviewers:
- saad-ali
- msau42
- jsafrane
# sig-windows-api-reviewers:
# -
# -
sig-windows-api-reviewers:
- patricklang
- michmike
dep-approvers:
- apelisse

View File

@ -9337,7 +9337,7 @@
"type": "string"
},
"runtimeClassName": {
"description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is an alpha feature and may change in the future.",
"description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
"type": "string"
},
"schedulerName": {
@ -11122,7 +11122,7 @@
"type": "string"
},
"subPathExpr": {
"description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is alpha in 1.14.",
"description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.",
"type": "string"
}
},

View File

@ -15,7 +15,7 @@
# This file creates a standard build environment for building cross
# platform go binary for the architecture kubernetes cares about.
FROM golang:1.12.1
FROM golang:1.12.4
ENV GOARM 7
ENV KUBE_DYNAMIC_CROSSPLATFORMS \

View File

@ -1 +1 @@
v1.12.1-2
v1.12.4-1

View File

@ -89,8 +89,8 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730
# $1 - server architecture
kube::build::get_docker_wrapped_binaries() {
local arch=$1
local debian_base_version=0.4.1
local debian_iptables_version=v11.0.1
local debian_base_version=v1.0.0
local debian_iptables_version=v11.0.2
### If you change any of these lists, please also update DOCKERIZED_BINARIES
### in build/BUILD. And kube::golang::server_image_targets
local targets=(

View File

@ -39,3 +39,32 @@ build:cross:linux_arm --config=repo_infra_crosstool --platforms=@io_bazel_rules_
build:cross:linux_arm64 --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_arm64 --cpu=arm64
build:cross:linux_ppc64le --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_ppc64le --cpu=ppc64le
build:cross:linux_s390x --config=repo_infra_crosstool --platforms=@io_bazel_rules_go//go/toolchain:linux_s390x --cpu=s390x
# --config=remote-cache enables a remote bazel cache
# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag
build:remote-cache --remote_cache=remotebuildexecution.googleapis.com
build:remote-cache --tls_enabled=true
build:remote-cache --remote_timeout=3600
build:remote-cache --auth_enabled=true
# --config=remote adds remote execution to the --config=remote-cache
# Note needs a --remote_instance_name=projects/PROJ/instances/default_instance flag
build:remote --config=remote-cache
build:remote --remote_executor=remotebuildexecution.googleapis.com
build:remote --jobs=500
build:remote --host_javabase=@rbe_default//java:jdk
build:remote --javabase=@rbe_default//java:jdk
build:remote --host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8
build:remote --crosstool_top=@rbe_default//cc:toolchain
build:remote --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1
build:remote --extra_toolchains=@rbe_default//config:cc-toolchain
build:remote --extra_execution_platforms=:rbe_with_network
build:remote --host_platform=:rbe_with_network
build:remote --platforms=:rbe_with_network
build:remote --spawn_strategy=remote
build:remote --strategy=Javac=remote
build:remote --strategy=Closure=remote
build:remote --strategy=Genrule=remote
build:remote --define=EXECUTOR=remote

View File

@ -106,3 +106,20 @@ genrule(
cmd = "grep ^STABLE_BUILD_SCM_REVISION bazel-out/stable-status.txt | awk '{print $$2}' >$@",
stamp = 1,
)
platform(
name = "rbe_with_network",
parents = ["@rbe_default//config:platform"],
# https://cloud.google.com/remote-build-execution/docs/remote-execution-environment#remote_execution_properties
remote_execution_properties = """
properties: {
name: "dockerNetwork"
value: "standard"
}
properties: {
name: "dockerPrivileged"
value: "true"
}
{PARENT_REMOTE_EXECUTION_PROPERTIES}
""",
)

View File

@ -1,6 +1,20 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//build:workspace_mirror.bzl", "mirror")
http_archive(
name = "bazel_toolchains",
sha256 = "8d43844d1d4447be2a108834771d617a1ad2a107f1680190bfe44925e7bf530e",
strip_prefix = "bazel-toolchains-4c003ad45e8a2d829ffc40e3aecfb6b8577a9406",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/4c003ad45e8a2d829ffc40e3aecfb6b8577a9406.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/4c003ad45e8a2d829ffc40e3aecfb6b8577a9406.tar.gz",
],
)
load("@bazel_toolchains//rules:rbe_repo.bzl", "rbe_autoconfig")
rbe_autoconfig(name = "rbe_default")
http_archive(
name = "bazel_skylib",
sha256 = "eb5c57e4c12e68c0c20bc774bfbc60a568e800d025557bc4ea022c6479acc867",
@ -21,8 +35,8 @@ http_archive(
http_archive(
name = "io_bazel_rules_go",
sha256 = "6433336b4c5feb54e2f45df4c1c84ea4385b2dc0b6f274ec2cd5d745045eae1f",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.2/rules_go-0.17.2.tar.gz"),
sha256 = "91b79f4758fd16f2c6426279ce00c1d2d8577d61c519db39675ed84657e1a95e",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.4/rules_go-0.17.4.tar.gz"),
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
@ -30,7 +44,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.12.1",
go_version = "1.12.4",
)
http_archive(
@ -51,7 +65,7 @@ load("@io_bazel_rules_docker//container:container.bzl", "container_pull")
container_pull(
name = "official_busybox",
digest = "sha256:cb63aa0641a885f54de20f61d152187419e8f6b159ed11a251a09d115fdff9bd",
digest = "sha256:5e8e0509e829bb8f990249135a36e81a3ecbe94294e7a185cc14616e5fad96bd",
registry = "index.docker.io",
repository = "library/busybox",
tag = "latest", # ignored, but kept here for documentation

View File

@ -42,13 +42,6 @@ _ETCD_TARBALL_ARCH_SHA256 = {
"ppc64le": "148fe96f0ec1813c5db9916199e96a913174304546bc8447a2d2f9fee4b8f6c2",
}
# Note that these are digests for the manifest list. We resolve the manifest
# list to each of its platform-specific images in
# debian_image_dependencies().
_DEBIAN_BASE_DIGEST = "sha256:6966a0aedd7592c18ff2dd803c08bd85780ee19f5e3a2e7cf908a4cd837afcde" # 0.4.1
_DEBIAN_IPTABLES_DIGEST = "sha256:656e45c00083359107b1d6ae0411ff3894ba23011a8533e229937a71be84e063" # v11.0.1
_DEBIAN_HYPERKUBE_BASE_DIGEST = "sha256:8cabe02be6e86685d8860b7ace7c7addc9591a339728703027a4854677f1c772" # 0.12.1
# Dependencies needed for a Kubernetes "release", e.g. building docker images,
# debs, RPMs, or tarballs.
def release_dependencies():
@ -75,30 +68,67 @@ def cri_tarballs():
urls = mirror("https://github.com/kubernetes-incubator/cri-tools/releases/download/v%s/crictl-v%s-linux-%s.tar.gz" % (CRI_TOOLS_VERSION, CRI_TOOLS_VERSION, arch)),
)
# Use go get -u github.com/estesp/manifest-tool to find these values
_DEBIAN_BASE_DIGEST = {
"manifest": "sha256:6966a0aedd7592c18ff2dd803c08bd85780ee19f5e3a2e7cf908a4cd837afcde",
"amd64": "sha256:8ccb65cd2dd7e0c24193d0742a20e4a673dbd11af5a33f16fcd471a31486866c",
"arm": "sha256:3432b41de3f6dfffdc1386fce961cfd1f9f8e208b3a35070e10ef3e2a733cb17",
"arm64": "sha256:9189251e1d1eb4126d6e6add2e272338f9c8a6a3db38863044625bca4b667f31",
"ppc64le": "sha256:50aa659e1e75e4231ee8293c3b4115e5755bb0517142b9b4bddbc134bf4354db",
"s390x": "sha256:bbb8ee3a2aaca738c00809f450233d98029fea4e319d8faaa30aa94c8b17a806",
}
_DEBIAN_IPTABLES_DIGEST = {
"manifest": "sha256:b522b0035dba3ac2d5c0dbaaf8217bd66248e790332ccfdf653e0f943a280dcf",
"amd64": "sha256:adc40e9ec817c15d35b26d1d6aa4d0f8096fba4c99e26a026159bb0bc98c6a89",
"arm": "sha256:58e8a1d3b187eed2d8d3664cd1c9723e5029698714a24dfca4b6ef42ea27a9d4",
"arm64": "sha256:1a63fdd216fe7b84561d40ab1ebaa0daae1fc73e4232a6caffbd8353d9a14cea",
"ppc64le": "sha256:9f90adbc7513cc96d92fcec7633c4b29e766dd31cf876af03c0b54374e22fa9c",
"s390x": "sha256:4f147708deff2a0163ee49b6980cc95423514bec5f4091612d65773b898fbdae",
}
_DEBIAN_HYPERKUBE_BASE_DIGEST = {
"manifest": "sha256:8cabe02be6e86685d8860b7ace7c7addc9591a339728703027a4854677f1c772",
"amd64": "sha256:5d4ea2fb5fbe9a9a9da74f67cf2faefc881968bc39f2ac5d62d9167e575812a1",
"arm": "sha256:73260814af61522ff6aa48291df457d3bb0a91c4bf72e7cfa51fbaf03eb65fae",
"arm64": "sha256:78eeb1a31eef7c16f954444d64636d939d89307e752964ad6d9d06966c722da3",
"ppc64le": "sha256:92857d647abe8d9c7b4d7160cd5699112afc12fde369082a8ed00688b17928a9",
"s390x": "sha256:c11d74fa0538c67238576c247bfaddf95ebaa90cd03cb4d2f2ac3c6ebe0441e2",
}
def _digest(d, arch):
if arch not in d:
print("WARNING: %s not found in %r" % (arch, d))
return d["manifest"]
return d[arch]
def debian_image_dependencies():
for arch in SERVER_PLATFORMS["linux"]:
container_pull(
name = "debian-base-" + arch,
architecture = arch,
digest = _DEBIAN_BASE_DIGEST,
digest = _digest(_DEBIAN_BASE_DIGEST, arch),
registry = "k8s.gcr.io",
repository = "debian-base",
tag = "0.4.1", # ignored, but kept here for documentation
)
container_pull(
name = "debian-iptables-" + arch,
architecture = arch,
digest = _DEBIAN_IPTABLES_DIGEST,
digest = _digest(_DEBIAN_IPTABLES_DIGEST, arch),
registry = "k8s.gcr.io",
repository = "debian-iptables",
tag = "v11.0.2", # ignored, but kept here for documentation
)
container_pull(
name = "debian-hyperkube-base-" + arch,
architecture = arch,
digest = _DEBIAN_HYPERKUBE_BASE_DIGEST,
digest = _digest(_DEBIAN_HYPERKUBE_BASE_DIGEST, arch),
registry = "k8s.gcr.io",
repository = "debian-hyperkube-base",
tag = "0.12.1", # ignored, but kept here for documentation
)
def etcd_tarballs():

View File

@ -68,6 +68,7 @@ data:
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf

View File

@ -68,6 +68,7 @@ data:
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf

View File

@ -68,6 +68,7 @@ data:
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf

View File

@ -111,7 +111,7 @@ spec:
operator: "Exists"
containers:
- name: node-cache
image: k8s.gcr.io/k8s-dns-node-cache:1.15.0
image: k8s.gcr.io/k8s-dns-node-cache:1.15.1
resources:
limits:
memory: 30Mi

View File

@ -3,7 +3,7 @@
[Metrics Server](https://github.com/kubernetes-incubator/metrics-server) exposes
core Kubernetes metrics via metrics API.
More details can be found in [Core metrics pipeline documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/core-metrics-pipeline/).
More details can be found in [Core metrics pipeline documentation](https://kubernetes.io/docs/tasks/debug-application-cluster/resource-metrics-pipeline/).
## Troubleshooting

View File

@ -57,6 +57,7 @@ spec:
# Remove these lines for non-GKE clusters, and when GKE supports token-based auth.
- --kubelet-port=10255
- --deprecated-kubelet-completely-insecure=true
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
ports:
- containerPort: 443
name: https

View File

@ -1,14 +0,0 @@
binaries
ca-cert
etcd-cert
master/bin/etcd
master/bin/etcdctl
master/bin/kube*
node/bin/docker
node/bin/etcd
node/bin/etcdctl
node/bin/flanneld
node/bin/kube*
local-test.sh

View File

@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- zouyee

View File

@ -1,137 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Download the flannel, etcd, docker, bridge-utils and K8s binaries automatically
# and store into binaries directory.
# Run as sudoers only
# author @kevin-wangzefeng
set -o errexit
set -o nounset
set -o pipefail
readonly ROOT=$(dirname "${BASH_SOURCE[0]}")
source "${ROOT}/config-build.sh"
# ensure $RELEASES_DIR is an absolute file path
mkdir -p "${RELEASES_DIR}"
RELEASES_DIR=$(cd "${RELEASES_DIR}"; pwd)
# get absolute file path of binaries
BINARY_DIR=$(cd "${ROOT}"; pwd)/binaries
function clean-up() {
rm -rf "${RELEASES_DIR}"
rm -rf "${BINARY_DIR}"
}
function download-releases() {
rm -rf "${RELEASES_DIR}"
mkdir -p "${RELEASES_DIR}"
echo "Download flannel release v${FLANNEL_VERSION} ..."
curl -L "${FLANNEL_DOWNLOAD_URL}" -o "${RELEASES_DIR}/flannel.tar.gz"
echo "Download etcd release v${ETCD_VERSION} ..."
curl -L "${ETCD_DOWNLOAD_URL}" -o "${RELEASES_DIR}/etcd.tar.gz"
echo "Download kubernetes release v${K8S_VERSION} ..."
curl -L "${K8S_CLIENT_DOWNLOAD_URL}" -o "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz"
curl -L "${K8S_SERVER_DOWNLOAD_URL}" -o "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz"
echo "Download docker release v${DOCKER_VERSION} ..."
curl -L "${DOCKER_DOWNLOAD_URL}" -o "${RELEASES_DIR}/docker.tar.gz"
}
function unpack-releases() {
rm -rf "${BINARY_DIR}"
mkdir -p "${BINARY_DIR}/master/bin"
mkdir -p "${BINARY_DIR}/node/bin"
# flannel
if [[ -f "${RELEASES_DIR}/flannel.tar.gz" ]] ; then
tar xzf "${RELEASES_DIR}/flannel.tar.gz" -C "${RELEASES_DIR}"
cp "${RELEASES_DIR}/flanneld" "${BINARY_DIR}/master/bin"
cp "${RELEASES_DIR}/flanneld" "${BINARY_DIR}/node/bin"
fi
# etcd
if [[ -f "${RELEASES_DIR}/etcd.tar.gz" ]] ; then
tar xzf "${RELEASES_DIR}/etcd.tar.gz" -C "${RELEASES_DIR}"
ETCD="etcd-v${ETCD_VERSION}-linux-amd64"
cp "${RELEASES_DIR}/${ETCD}/etcd" \
"${RELEASES_DIR}/${ETCD}/etcdctl" "${BINARY_DIR}/master/bin"
cp "${RELEASES_DIR}/${ETCD}/etcd" \
"${RELEASES_DIR}/${ETCD}/etcdctl" "${BINARY_DIR}/node/bin"
fi
# k8s
if [[ -f "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz" ]] ; then
tar xzf "${RELEASES_DIR}/kubernetes-client-linux-amd64.tar.gz" -C "${RELEASES_DIR}"
cp "${RELEASES_DIR}/kubernetes/client/bin/kubectl" "${BINARY_DIR}"
fi
if [[ -f "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz" ]] ; then
tar xzf "${RELEASES_DIR}/kubernetes-server-linux-amd64.tar.gz" -C "${RELEASES_DIR}"
cp "${RELEASES_DIR}/kubernetes/server/bin/kube-apiserver" \
"${RELEASES_DIR}/kubernetes/server/bin/kube-controller-manager" \
"${RELEASES_DIR}/kubernetes/server/bin/kube-scheduler" "${BINARY_DIR}/master/bin"
cp "${RELEASES_DIR}/kubernetes/server/bin/kubelet" \
"${RELEASES_DIR}/kubernetes/server/bin/kube-proxy" "${BINARY_DIR}/node/bin"
fi
# docker
if [[ -f "${RELEASES_DIR}/docker.tar.gz" ]]; then
tar xzf "${RELEASES_DIR}/docker.tar.gz" -C "${RELEASES_DIR}"
cp "${RELEASES_DIR}/docker/docker*" "${BINARY_DIR}/node/bin"
fi
chmod -R +x "${BINARY_DIR}"
echo "Done! All binaries are stored in ${BINARY_DIR}"
}
function parse-opt() {
local opt=${1-}
case $opt in
download)
download-releases
;;
unpack)
unpack-releases
;;
clean)
clean-up
;;
all)
download-releases
unpack-releases
;;
*)
echo "Usage: "
echo " build.sh <command>"
echo "Commands:"
echo " clean Clean up downloaded releases and unpacked binaries."
echo " download Download releases to \"${RELEASES_DIR}\"."
echo " unpack Unpack releases downloaded in \"${RELEASES_DIR}\", and copy binaries to \"${BINARY_DIR}\"."
echo " all Download releases and unpack them."
;;
esac
}
parse-opt "${@}"

View File

@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for the Binaries downloading and unpacking.
# Directory to store release packages that will be downloaded.
RELEASES_DIR=${RELEASES_DIR:-/tmp/downloads}
# Define docker version to use.
DOCKER_VERSION=${DOCKER_VERSION:-"1.12.1"}
# Define flannel version to use.
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.6.1"}
# Define etcd version to use.
ETCD_VERSION=${ETCD_VERSION:-"3.0.9"}
# Define k8s version to use.
K8S_VERSION=${K8S_VERSION:-"1.3.7"}
# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell
DOCKER_DOWNLOAD_URL=\
"https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz"
# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell
FLANNEL_DOWNLOAD_URL=\
"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-v${FLANNEL_VERSION}-linux-amd64.tar.gz"
# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell
ETCD_DOWNLOAD_URL=\
"https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell
K8S_CLIENT_DOWNLOAD_URL=\
"https://dl.k8s.io/v${K8S_VERSION}/kubernetes-client-linux-amd64.tar.gz"
# shellcheck disable=2034 # Variables sourced in other scripts executed from the same shell
K8S_SERVER_DOWNLOAD_URL=\
"https://dl.k8s.io/v${K8S_VERSION}/kubernetes-server-linux-amd64.tar.gz"

View File

@ -1,143 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
readonly root=$(dirname "${BASH_SOURCE[0]}")
## Contains configuration values for the CentOS cluster
# The user should have sudo privilege
export MASTER=${MASTER:-"centos@172.10.0.11"}
export MASTER_IP=${MASTER#*@}
# Define all your master nodes,
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>.
# The user should have sudo privilege
export MASTERS="${MASTERS:-$MASTER}"
# length-of <arg0>
# Get the length of specific arg0, could be a space-separate string or array.
function length-of() {
local len=0
# shellcheck disable=SC2034 # Unused variables left for readability
for part in $1; do
((++len))
done
echo $len
}
# Number of nodes in your cluster.
export NUM_MASTERS="${NUM_MASTERS:-$(length-of "$MASTERS")}"
# Get default master advertise address: first master node.
function default-advertise-address() {
# get the first master node
local masters_array=("${MASTERS}")
local master=${masters_array[0]}
echo "${master#*@}"
}
# Define advertise address of masters, could be a load balancer address.
# If not provided, the default is ip of first master node.
export MASTER_ADVERTISE_ADDRESS="${MASTER_ADVERTISE_ADDRESS:-$(default-advertise-address)}"
export MASTER_ADVERTISE_IP="${MASTER_ADVERTISE_IP:-$(getent hosts "${MASTER_ADVERTISE_ADDRESS}" | awk '{print $1; exit}')}"
# Define all your minion nodes,
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>.
# The user should have sudo privilege
export NODES="${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}"
# Number of nodes in your cluster.
export NUM_NODES="${NUM_NODES:-$(length-of "$NODES")}"
# Should be removed when NUM_NODES is deprecated in validate-cluster.sh
export NUM_NODES="${NUM_NODES}"
# By default, the cluster will use the etcd installed on master.
function concat-etcd-servers() {
local etcd_servers=""
for master in ${MASTERS}; do
local master_ip=${master#*@}
local prefix=""
if [ -n "$etcd_servers" ]; then
prefix="${etcd_servers},"
fi
etcd_servers="${prefix}https://${master_ip}:2379"
done
echo "$etcd_servers"
}
ETCD_SERVERS="$(concat-etcd-servers)"
export ETCD_SERVERS
# By default, etcd cluster will use runtime configuration
# https://coreos.com/etcd/docs/latest/v2/runtime-configuration.html
# Get etc initial cluster and store in ETCD_INITIAL_CLUSTER
function concat-etcd-initial-cluster() {
local etcd_initial_cluster=""
local num_infra=0
for master in ${MASTERS}; do
local master_ip="${master#*@}"
if [ -n "$etcd_initial_cluster" ]; then
etcd_initial_cluster+=","
fi
etcd_initial_cluster+="infra${num_infra}=https://${master_ip}:2380"
((++num_infra))
done
echo "$etcd_initial_cluster"
}
ETCD_INITIAL_CLUSTER="$(concat-etcd-initial-cluster)"
export ETCD_INITIAL_CLUSTER
CERT_DIR="${CERT_DIR:-${root}/ca-cert}"
mkdir -p "${CERT_DIR}"
# CERT_DIR path must be absolute.
CERT_DIR="$(cd "${CERT_DIR}" && pwd)"
export CERT_DIR
# define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-"192.168.3.0/24"}
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
export ENABLE_CLUSTER_DNS
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.100"}
DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"}
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
export ENABLE_CLUSTER_UI
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
# Admission Controllers to invoke prior to persisting objects in cluster.
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.
# If we included ResourceQuota, we should keep it at the end of the list to
# prevent incrementing quota usage prematurely.
export ADMISSION_CONTROL=${ADMISSION_CONTROL:-"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeClaimResize,DefaultTolerationSeconds,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"}
# Extra options to set on the Docker command line.
# This is useful for setting --insecure-registry for local registries.
export DOCKER_OPTS=${DOCKER_OPTS:-""}
# Timeouts for process checking on master and minion
export PROCESS_CHECK_TIMEOUT=${PROCESS_CHECK_TIMEOUT:-180} # seconds.
unset -f default-advertise-address concat-etcd-servers length-of concat-etcd-initial-cluster

View File

@ -1,19 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## for CentOS/Fedora/RHEL cluster in test mode
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/cluster/centos/config-default.sh"

View File

@ -1,66 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# deploy the add-on services after the cluster is available
set -e
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/cluster/centos/config-default.sh"
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
export KUBECTL_PATH="${KUBE_ROOT}/cluster/centos/binaries/kubectl"
export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/centos/config-default.sh}
function deploy_dns {
echo "Deploying DNS on Kubernetes"
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.sed" kube-dns.yaml
sed -i -e "s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g" kube-dns.yaml
sed -i -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" kube-dns.yaml
KUBEDNS=$("${KUBECTL} get services --namespace=kube-system | grep kube-dns | cat")
if [ ! "$KUBEDNS" ]; then
# use kubectl to create kube-dns addon
${KUBECTL} --namespace=kube-system create -f kube-dns.yaml
echo "Kube-dns addon is successfully deployed."
else
echo "Kube-dns addon is already deployed. Skipping."
fi
echo
}
function deploy_dashboard {
echo "Deploying Kubernetes Dashboard"
${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-secret.yaml"
${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-configmap.yaml"
${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-rbac.yaml"
${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-controller.yaml"
${KUBECTL} apply -f "${KUBE_ROOT}/cluster/addons/dashboard/dashboard-service.yaml"
echo
}
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
deploy_dns
fi
if [ "${ENABLE_CLUSTER_UI}" == true ]; then
deploy_dashboard
fi

View File

@ -1,89 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
DEBUG="${DEBUG:-false}"
if [ "${DEBUG}" == "true" ]; then
set -x
fi
cert_ip=$1
extra_sans=${2:-}
cert_dir=${CERT_DIR:-/srv/kubernetes}
cert_group=${CERT_GROUP:-kube-cert}
mkdir -p "$cert_dir"
use_cn=false
sans="IP:${cert_ip}"
if [[ -n "${extra_sans}" ]]; then
sans="${sans},${extra_sans}"
fi
tmpdir=$(mktemp -d -t kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
# but is originally taken from:
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
#
# To update, do the following:
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
#
# Due to GCS caching of public objects, it may take time for this to be widely
# distributed.
#
# Use ~/kube/easy-rsa.tar.gz if it exists, so that it can be
# pre-pushed in cases where an outgoing connection is not allowed.
if [ -f ~/kube/easy-rsa.tar.gz ]; then
ln -s ~/kube/easy-rsa.tar.gz .
else
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
fi
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
cd easy-rsa-master/easyrsa3
./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
if [ $use_cn = "true" ]; then
./easyrsa build-server-full "${cert_ip}" nopass > /dev/null 2>&1
cp -p "pki/issued/${cert_ip}.crt" "${cert_dir}/server.cert" > /dev/null 2>&1
cp -p "pki/private/${cert_ip}.key" "${cert_dir}/server.key" > /dev/null 2>&1
else
./easyrsa --subject-alt-name="${sans}" build-server-full kubernetes-master nopass > /dev/null 2>&1
cp -p pki/issued/kubernetes-master.crt "${cert_dir}/server.cert" > /dev/null 2>&1
cp -p pki/private/kubernetes-master.key "${cert_dir}/server.key" > /dev/null 2>&1
fi
# Make a superuser client cert with subject "O=system:masters, CN=kubecfg"
./easyrsa --dn-mode=org \
--req-cn=kubecfg --req-org=system:masters \
--req-c= --req-st= --req-city= --req-email= --req-ou= \
build-client-full kubecfg nopass > /dev/null 2>&1
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
# Make server certs accessible to apiserver.
chgrp "${cert_group}" "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert" "${cert_dir}/ca.crt"

View File

@ -1,122 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
ETCD_SERVERS=${2:-"https://8.8.8.18:2379"}
SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"}
ADMISSION_CONTROL=${4:-""}
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
# --logtostderr=true: log to standard error instead of files
KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --etcd-servers=[]: List of etcd servers to watch (http://ip:port),
# comma separated. Mutually exclusive with -etcd-config
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
# --etcd-cafile="": SSL Certificate Authority file used to secure etcd communication.
KUBE_ETCD_CAFILE="--etcd-cafile=/srv/kubernetes/etcd/ca.pem"
# --etcd-certfile="": SSL certification file used to secure etcd communication.
KUBE_ETCD_CERTFILE="--etcd-certfile=/srv/kubernetes/etcd/client.pem"
# --etcd-keyfile="": key file used to secure etcd communication.
KUBE_ETCD_KEYFILE="--etcd-keyfile=/srv/kubernetes/etcd/client-key.pem"
# --insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# --insecure-port=8080: The port on which to serve unsecured, unauthenticated access.
KUBE_API_PORT="--insecure-port=8080"
# --kubelet-port=10250: Kubelet port
NODE_PORT="--kubelet-port=10250"
# --advertise-address=<nil>: The IP address on which to advertise
# the apiserver to members of the cluster.
KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}"
# --allow-privileged=false: If true, allow privileged containers.
KUBE_ALLOW_PRIV="--allow-privileged=false"
# --service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs.
# This must not overlap with any IP ranges assigned to nodes for pods.
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
# --admission-control="AlwaysAdmit": Ordered list of plug-ins
# to do admission control of resources into cluster.
# Comma-delimited list of:
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
# NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit,
# ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota
# Mark Deprecated. Use --enable-admission-plugins or --disable-admission-plugins instead since v1.10.
# It will be removed in a future version.
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
# --client-ca-file="": If set, any request presenting a client certificate signed
# by one of the authorities in the client-ca-file is authenticated with an identity
# corresponding to the CommonName of the client certificate.
KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt"
# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any,
# concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file
# and --tls-private-key-file are not provided, a self-signed certificate and key are
# generated for the public address and saved to /var/run/kubernetes.
KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.cert"
# --tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key"
EOF
KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_ETCD_SERVERS} \\
\${KUBE_ETCD_CAFILE} \\
\${KUBE_ETCD_CERTFILE} \\
\${KUBE_ETCD_KEYFILE} \\
\${KUBE_API_ADDRESS} \\
\${KUBE_API_PORT} \\
\${NODE_PORT} \\
\${KUBE_ADVERTISE_ADDR} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBE_SERVICE_ADDRESSES} \\
\${KUBE_ADMISSION_CONTROL} \\
\${KUBE_API_CLIENT_CA_FILE} \\
\${KUBE_API_TLS_CERT_FILE} \\
\${KUBE_API_TLS_PRIVATE_KEY_FILE}"
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver

View File

@ -1,61 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=4"
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
# --root-ca-file="": If set, this root certificate authority will be included in
# service account's token secret. This must be a valid PEM-encoded CA bundle.
KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt"
# --service-account-private-key-file="": Filename containing a PEM-encoded private
# RSA key used to sign service account tokens.
KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key"
# --leader-elect: Start a leader election client and gain leadership before
# executing the main loop. Enable this when running replicated components for high availability.
KUBE_LEADER_ELECT="--leader-elect"
EOF
KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_MASTER} \\
\${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\
\${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}\\
\${KUBE_LEADER_ELECT}"
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager

View File

@ -1,86 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Create etcd.conf, etcd.service, and start etcd service.
etcd_data_dir=/var/lib/etcd
mkdir -p ${etcd_data_dir}
ETCD_NAME=${1:-"default"}
ETCD_LISTEN_IP=${2:-"0.0.0.0"}
ETCD_INITIAL_CLUSTER=${3:-}
cat <<EOF >/opt/kubernetes/cfg/etcd.conf
# [member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="${etcd_data_dir}/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://${ETCD_LISTEN_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_LISTEN_IP}:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_LISTEN_IP}:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="${ETCD_INITIAL_CLUSTER}"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_LISTEN_IP}:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_SRV=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#
#[proxy]
#ETCD_PROXY="off"
#
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/srv/kubernetes/etcd/ca.pem"
ETCD_CERT_FILE="/srv/kubernetes/etcd/server-${ETCD_NAME}.pem"
ETCD_KEY_FILE="/srv/kubernetes/etcd/server-${ETCD_NAME}-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/srv/kubernetes/etcd/ca.pem"
ETCD_PEER_CERT_FILE="/srv/kubernetes/etcd/peer-${ETCD_NAME}.pem"
ETCD_PEER_KEY_FILE="/srv/kubernetes/etcd/peer-${ETCD_NAME}-key.pem"
EOF
cat <<EOF >//usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=${etcd_data_dir}
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /opt/kubernetes/bin/etcd"
Type=notify
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

View File

@ -1,72 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ETCD_SERVERS=${1:-"http://8.8.8.18:4001"}
FLANNEL_NET=${2:-"172.16.0.0/16"}
CA_FILE="/srv/kubernetes/etcd/ca.pem"
CERT_FILE="/srv/kubernetes/etcd/client.pem"
KEY_FILE="/srv/kubernetes/etcd/client-key.pem"
cat <<EOF >/opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}"
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=${CA_FILE}"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=${CERT_FILE}"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=${KEY_FILE}"
EOF
cat <<EOF >/usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE}
Type=notify
[Install]
WantedBy=multi-user.target
EOF
# Store FLANNEL_NET to etcd.
attempt=0
while true; do
if /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
--no-sync -C "${ETCD_SERVERS}" \
get /coreos.com/network/config >/dev/null 2>&1; then
break
else
if (( attempt > 600 )); then
echo "timeout for waiting network config" > ~/kube/err.log
exit 2
fi
/opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
--no-sync -C "${ETCD_SERVERS}" \
mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1
attempt=$((attempt+1))
sleep 3
fi
done
wait
systemctl enable flannel
systemctl daemon-reload
systemctl restart flannel

View File

@ -1,23 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Set initial-cluster-state to existing, and restart etcd service.
sed -i 's/ETCD_INITIAL_CLUSTER_STATE="new"/ETCD_INITIAL_CLUSTER_STATE="existing"/' /opt/kubernetes/cfg/etcd.conf
systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

View File

@ -1,64 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
###
# kubernetes scheduler config
# --logtostderr=true: log to standard error instead of files
KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --master: The address of the Kubernetes API server (overrides any value in kubeconfig).
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
# --leader-elect: Start a leader election client and gain leadership before
# executing the main loop. Enable this when running replicated components for high availability.
KUBE_LEADER_ELECT="--leader-elect"
# Add your own!
KUBE_SCHEDULER_ARGS=""
EOF
KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_MASTER} \\
\${KUBE_LEADER_ELECT} \\
\$KUBE_SCHEDULER_ARGS"
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

View File

@ -1,113 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate Docker daemon options based on flannel env file.
# exit on any error
set -e
usage() {
echo "$0 [-f FLANNEL-ENV-FILE] [-d DOCKER-ENV-FILE] [-i] [-c] [-m] [-k COMBINED-KEY]
Generate Docker daemon options based on flannel env file
OPTIONS:
-f Path to flannel env file. Defaults to /run/flannel/subnet.env
-d Path to Docker env file to write to. Defaults to /run/docker_opts.env
-i Output each Docker option as individual var. e.g. DOCKER_OPT_MTU=1500
-c Output combined Docker options into DOCKER_OPTS var
-k Set the combined options key to this value (default DOCKER_OPTS=)
-m Do not output --ip-masq (useful for older Docker version)
" >/dev/stderr
exit 1
}
flannel_env="/run/flannel/subnet.env"
docker_env="/run/docker_opts.env"
combined_opts_key="DOCKER_OPTS"
indiv_opts=false
combined_opts=false
ipmasq=true
val=""
while getopts "f:d:icmk:" opt; do
case $opt in
f)
flannel_env=$OPTARG
;;
d)
docker_env=$OPTARG
;;
i)
indiv_opts=true
;;
c)
combined_opts=true
;;
m)
ipmasq=false
;;
k)
combined_opts_key=$OPTARG
;;
\?)
usage
;;
esac
done
if [[ $indiv_opts = false ]] && [[ $combined_opts = false ]]; then
indiv_opts=true
combined_opts=true
fi
if [[ -f "${flannel_env}" ]]; then
source "${flannel_env}"
fi
if [[ -n "$FLANNEL_SUBNET" ]]; then
# shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below
DOCKER_OPT_BIP="--bip=$FLANNEL_SUBNET"
fi
if [[ -n "$FLANNEL_MTU" ]]; then
# shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below
DOCKER_OPT_MTU="--mtu=$FLANNEL_MTU"
fi
if [[ "$FLANNEL_IPMASQ" = true ]] && [[ $ipmasq = true ]]; then
# shellcheck disable=SC2034 # Variable name referenced in OPT_LOOP below
DOCKER_OPT_IPMASQ="--ip-masq=false"
fi
eval docker_opts="\$${combined_opts_key}"
docker_opts+=" "
echo -n "" >"${docker_env}"
# OPT_LOOP
for opt in $(compgen -v DOCKER_OPT_); do
eval val=\$"${opt}"
if [[ "$indiv_opts" = true ]]; then
echo "$opt=\"$val\"" >>"${docker_env}"
fi
docker_opts+="$val "
done
if [[ "$combined_opts" = true ]]; then
echo "${combined_opts_key}=\"${docker_opts}\"" >>"${docker_env}"
fi

View File

@ -1,27 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Delete default docker bridge, so that docker can start with flannel network.
# exit on any error
set -e
rc=0
ip link show docker0 >/dev/null 2>&1 || rc="$?"
if [[ "$rc" -eq "0" ]]; then
ip link set dev docker0 down
ip link delete docker0
fi

View File

@ -1,48 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCKER_OPTS=${1:-""}
DOCKER_CONFIG=/opt/kubernetes/cfg/docker
cat <<EOF >$DOCKER_CONFIG
DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s overlay --selinux-enabled=false ${DOCKER_OPTS}"
EOF
cat <<EOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target flannel.service
Requires=flannel.service
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
EnvironmentFile=-/opt/kubernetes/cfg/docker
WorkingDirectory=/opt/kubernetes/bin
ExecStart=/opt/kubernetes/bin/dockerd \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPTS
LimitNOFILE=1048576
LimitNPROC=1048576
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable docker
systemctl restart docker

View File

@ -1,74 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ETCD_SERVERS=${1:-"https://8.8.8.18:2379"}
FLANNEL_NET=${2:-"172.16.0.0/16"}
CA_FILE="/srv/kubernetes/etcd/ca.pem"
CERT_FILE="/srv/kubernetes/etcd/client.pem"
KEY_FILE="/srv/kubernetes/etcd/client-key.pem"
cat <<EOF >/opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}"
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=${CA_FILE}"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=${CERT_FILE}"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=${KEY_FILE}"
EOF
cat <<EOF >/usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} \${FLANNEL_ETCD_CAFILE} \${FLANNEL_ETCD_CERTFILE} \${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
Type=notify
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF
# Store FLANNEL_NET to etcd.
attempt=0
while true; do
if /opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
--no-sync -C "${ETCD_SERVERS}" \
get /coreos.com/network/config >/dev/null 2>&1; then
break
else
if (( attempt > 600 )); then
echo "timeout for waiting network config" > ~/kube/err.log
exit 2
fi
/opt/kubernetes/bin/etcdctl --ca-file ${CA_FILE} --cert-file ${CERT_FILE} --key-file ${KEY_FILE} \
--no-sync -C "${ETCD_SERVERS}" \
mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1
attempt=$((attempt+1))
sleep 3
fi
done
wait
systemctl daemon-reload

View File

@ -1,98 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
NODE_ADDRESS=${2:-"8.8.8.20"}
DNS_SERVER_IP=${3:-"192.168.3.100"}
DNS_DOMAIN=${4:-"cluster.local"}
KUBECONFIG_DIR=${KUBECONFIG_DIR:-/opt/kubernetes/cfg}
# Generate a kubeconfig file
cat <<EOF > "${KUBECONFIG_DIR}/kubelet.kubeconfig"
apiVersion: v1
kind: Config
clusters:
- cluster:
server: http://${MASTER_ADDRESS}:8080/
name: local
contexts:
- context:
cluster: local
name: local
current-context: local
EOF
cat <<EOF >/opt/kubernetes/cfg/kubelet
# --logtostderr=true: log to standard error instead of files
KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)
NODE_ADDRESS="--address=${NODE_ADDRESS}"
# --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag.
NODE_PORT="--port=10250"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# Path to a kubeconfig file, specifying how to connect to the API server.
KUBELET_KUBECONFIG="--kubeconfig=${KUBECONFIG_DIR}/kubelet.kubeconfig"
# --allow-privileged=false: If true, allow containers to request privileged mode. [default=false]
KUBE_ALLOW_PRIV="--allow-privileged=false"
# DNS info
KUBELET__DNS_IP="--cluster-dns=${DNS_SERVER_IP}"
KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}"
# Add your own!
KUBELET_ARGS=""
EOF
KUBELET_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_ADDRESS} \\
\${NODE_PORT} \\
\${NODE_HOSTNAME} \\
\${KUBELET_KUBECONFIG} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBELET__DNS_IP} \\
\${KUBELET_DNS_DOMAIN} \\
\$KUBELET_ARGS"
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet ${KUBELET_OPTS}
Restart=on-failure
KillMode=process
RestartSec=15s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

View File

@ -1,56 +0,0 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
NODE_ADDRESS=${2:-"8.8.8.20"}
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
# --logtostderr=true: log to standard error instead of files
KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
EOF
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${NODE_HOSTNAME} \\
\${KUBE_MASTER}"
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

View File

@ -1,388 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
# exit on any error
set -e
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -C"
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
readonly ROOT=$(dirname "${BASH_SOURCE[0]}")
source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}"
source "$KUBE_ROOT/cluster/common.sh"
# shellcheck disable=SC2034 # Can't tell if this is still needed or not
KUBECTL_PATH=${KUBE_ROOT}/cluster/centos/binaries/kubectl
# Directory to be used for master and node provisioning.
KUBE_TEMP="${HOME}/kube_temp"
# Get master IP addresses and store in KUBE_MASTER_IP_ADDRESSES[]
# Must ensure that the following ENV vars are set:
# MASTERS
function detect-masters() {
KUBE_MASTER_IP_ADDRESSES=()
for master in ${MASTERS}; do
KUBE_MASTER_IP_ADDRESSES+=("${master#*@}")
done
echo "KUBE_MASTERS: ${MASTERS}" 1>&2
echo "KUBE_MASTER_IP_ADDRESSES: [${KUBE_MASTER_IP_ADDRESSES[*]}]" 1>&2
}
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-nodes() {
KUBE_NODE_IP_ADDRESSES=()
for node in ${NODES}; do
KUBE_NODE_IP_ADDRESSES+=("${node#*@}")
done
echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2
}
# Verify prereqs on host machine
function verify-prereqs() {
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
rc=0
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
echo "Could not find or add an SSH identity."
echo "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
# Install handler for signal trap
function trap-add {
local handler="$1"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p "${signal}")")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
# shellcheck disable=SC2064 # Early expansion is intentional here.
trap "${handler}" "${signal}"
}
# Validate a kubernetes cluster
function validate-cluster() {
# by default call the generic validate-cluster.sh script, customizable by
# any cluster provider if this does not fit.
set +e
if ! "${KUBE_ROOT}/cluster/validate-cluster.sh"; then
for master in ${MASTERS}; do
troubleshoot-master "${master}"
done
for node in ${NODES}; do
troubleshoot-node "${node}"
done
exit 1
fi
set -e
}
# Instantiate a kubernetes cluster
function kube-up() {
make-ca-cert
local num_infra=0
for master in ${MASTERS}; do
provision-master "${master}" "infra${num_infra}"
((++num_infra))
done
for master in ${MASTERS}; do
post-provision-master "${master}"
done
for node in ${NODES}; do
provision-node "${node}"
done
detect-masters
# set CONTEXT and KUBE_SERVER values for create-kubeconfig() and get-password()
export CONTEXT="centos"
export KUBE_SERVER="http://${MASTER_ADVERTISE_ADDRESS}:8080"
source "${KUBE_ROOT}/cluster/common.sh"
# set kubernetes user and password
get-password
create-kubeconfig
}
# Delete a kubernetes cluster
function kube-down() {
for master in ${MASTERS}; do
tear-down-master "${master}"
done
for node in ${NODES}; do
tear-down-node "${node}"
done
}
function troubleshoot-master() {
# Troubleshooting on master if all required daemons are active.
echo "[INFO] Troubleshooting on master $1"
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
local daemon
local daemon_status
printf "%-24s %-10s \n" "PROCESS" "STATUS"
for daemon in "${required_daemon[@]}"; do
local rc=0
kube-ssh "${1}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?"
if [[ "${rc}" -ne "0" ]]; then
daemon_status="inactive"
else
daemon_status="active"
fi
printf "%-24s %s\n" "${daemon}" ${daemon_status}
done
printf "\n"
}
function troubleshoot-node() {
# Troubleshooting on node if all required daemons are active.
echo "[INFO] Troubleshooting on node ${1}"
local -a required_daemon=("kube-proxy" "kubelet" "docker" "flannel")
local daemon
local daemon_status
printf "%-24s %-10s \n" "PROCESS" "STATUS"
for daemon in "${required_daemon[@]}"; do
local rc=0
kube-ssh "${1}" "sudo systemctl is-active ${daemon}" >/dev/null 2>&1 || rc="$?"
if [[ "${rc}" -ne "0" ]]; then
daemon_status="inactive"
else
daemon_status="active"
fi
printf "%-24s %s\n" "${daemon}" ${daemon_status}
done
printf "\n"
}
# Clean up on master
function tear-down-master() {
echo "[INFO] tear-down-master on $1"
for service_name in etcd kube-apiserver kube-controller-manager kube-scheduler ; do
service_file="/usr/lib/systemd/system/${service_name}.service"
kube-ssh "$1" " \
if [[ -f $service_file ]]; then \
sudo systemctl stop $service_name; \
sudo systemctl disable $service_name; \
sudo rm -f $service_file; \
fi"
done
kube-ssh "${1}" "sudo rm -rf /opt/kubernetes"
kube-ssh "${1}" "sudo rm -rf /srv/kubernetes"
kube-ssh "${1}" "sudo rm -rf ${KUBE_TEMP}"
kube-ssh "${1}" "sudo rm -rf /var/lib/etcd"
}
# Clean up on node
function tear-down-node() {
echo "[INFO] tear-down-node on $1"
for service_name in kube-proxy kubelet docker flannel ; do
service_file="/usr/lib/systemd/system/${service_name}.service"
kube-ssh "$1" " \
if [[ -f $service_file ]]; then \
sudo systemctl stop $service_name; \
sudo systemctl disable $service_name; \
sudo rm -f $service_file; \
fi"
done
kube-ssh "$1" "sudo rm -rf /run/flannel"
kube-ssh "$1" "sudo rm -rf /opt/kubernetes"
kube-ssh "$1" "sudo rm -rf /srv/kubernetes"
kube-ssh "$1" "sudo rm -rf ${KUBE_TEMP}"
}
# Generate the CA certificates for k8s components
function make-ca-cert() {
echo "[INFO] make-ca-cert"
bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
}
# Provision master
#
# Assumed vars:
# $1 (master)
# $2 (etcd_name)
# KUBE_TEMP
# ETCD_SERVERS
# ETCD_INITIAL_CLUSTER
# SERVICE_CLUSTER_IP_RANGE
# MASTER_ADVERTISE_ADDRESS
function provision-master() {
echo "[INFO] Provision master on $1"
local master="$1"
local master_ip="${master#*@}"
local etcd_name="$2"
ensure-setup-dir "${master}"
ensure-etcd-cert "${etcd_name}" "${master_ip}"
kube-scp "${master}" "${ROOT}/ca-cert ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
kube-scp "${master}" "${ROOT}/etcd-cert/ca.pem \
${ROOT}/etcd-cert/client.pem \
${ROOT}/etcd-cert/client-key.pem \
${ROOT}/etcd-cert/server-${etcd_name}.pem \
${ROOT}/etcd-cert/server-${etcd_name}-key.pem \
${ROOT}/etcd-cert/peer-${etcd_name}.pem \
${ROOT}/etcd-cert/peer-${etcd_name}-key.pem" "${KUBE_TEMP}/etcd-cert"
kube-ssh "${master}" " \
sudo rm -rf /opt/kubernetes/bin; \
sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \
sudo mkdir -p /srv/kubernetes/; sudo cp -f ${KUBE_TEMP}/ca-cert/* /srv/kubernetes/; \
sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \
sudo chmod -R +x /opt/kubernetes/bin; \
sudo ln -sf /opt/kubernetes/bin/* /usr/local/bin/; \
sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh ${etcd_name} ${master_ip} ${ETCD_INITIAL_CLUSTER}; \
sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \
sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${MASTER_ADVERTISE_ADDRESS}; \
sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${MASTER_ADVERTISE_ADDRESS}"
}
# Post-provision master, run after all masters were provisioned
#
# Assumed vars:
# $1 (master)
# KUBE_TEMP
# ETCD_SERVERS
# FLANNEL_NET
function post-provision-master() {
echo "[INFO] Post provision master on $1"
local master=$1
kube-ssh "${master}" " \
sudo bash ${KUBE_TEMP}/master/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
sudo bash ${KUBE_TEMP}/master/scripts/post-etcd.sh"
}
# Provision node
#
# Assumed vars:
# $1 (node)
# KUBE_TEMP
# ETCD_SERVERS
# FLANNEL_NET
# MASTER_ADVERTISE_ADDRESS
# DOCKER_OPTS
# DNS_SERVER_IP
# DNS_DOMAIN
function provision-node() {
echo "[INFO] Provision node on $1"
local node=$1
local node_ip=${node#*@}
local dns_ip=${DNS_SERVER_IP#*@}
# shellcheck disable=SC2153 # DNS_DOMAIN sourced from external file
local dns_domain=${DNS_DOMAIN#*@}
ensure-setup-dir "${node}"
kube-scp "${node}" "${ROOT}/binaries/node ${ROOT}/node ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
kube-scp "${node}" "${ROOT}/etcd-cert/ca.pem \
${ROOT}/etcd-cert/client.pem \
${ROOT}/etcd-cert/client-key.pem" "${KUBE_TEMP}/etcd-cert"
kube-ssh "${node}" " \
rm -rf /opt/kubernetes/bin; \
sudo cp -r ${KUBE_TEMP}/node/bin /opt/kubernetes; \
sudo chmod -R +x /opt/kubernetes/bin; \
sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \
sudo ln -s /opt/kubernetes/bin/* /usr/local/bin/; \
sudo mkdir -p /srv/kubernetes/etcd; sudo cp -f ${KUBE_TEMP}/etcd-cert/* /srv/kubernetes/etcd/; \
sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${MASTER_ADVERTISE_ADDRESS} ${node_ip} ${dns_ip} ${dns_domain}; \
sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${MASTER_ADVERTISE_ADDRESS}"
}
# Create dirs that'll be used during setup on target machine.
#
# Assumed vars:
# KUBE_TEMP
function ensure-setup-dir() {
kube-ssh "${1}" "mkdir -p ${KUBE_TEMP}; \
mkdir -p ${KUBE_TEMP}/etcd-cert; \
sudo mkdir -p /opt/kubernetes/bin; \
sudo mkdir -p /opt/kubernetes/cfg"
}
# Generate certificates for etcd cluster
#
# Assumed vars:
# $1 (etcd member name)
# $2 (master ip)
function ensure-etcd-cert() {
local etcd_name="$1"
local master_ip="$2"
local cert_dir="${ROOT}/etcd-cert"
if [[ ! -r "${cert_dir}/client.pem" || ! -r "${cert_dir}/client-key.pem" ]]; then
generate-etcd-cert "${cert_dir}" "${master_ip}" "client" "client"
fi
generate-etcd-cert "${cert_dir}" "${master_ip}" "server" "server-${etcd_name}"
generate-etcd-cert "${cert_dir}" "${master_ip}" "peer" "peer-${etcd_name}"
}
# Run command over ssh
function kube-ssh() {
local host="$1"
shift
ssh "${SSH_OPTS}" -t "${host}" "$@" >/dev/null 2>&1
}
# Copy file recursively over ssh
function kube-scp() {
local host="$1"
local src=("$2")
local dst="$3"
scp -r "${SSH_OPTS}" "${src[*]}" "${host}:${dst}"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig if available.
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
load-or-gen-kube-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER="admin"
KUBE_PASSWORD=$(python -c 'import string,random; '\
'print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
fi
}

View File

@ -18,7 +18,7 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE}")/..}
KUBE_ROOT=${KUBE_ROOT:-$(dirname "${BASH_SOURCE[0]}")/..}
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in

View File

@ -20,7 +20,7 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd)
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config"

View File

@ -2,6 +2,7 @@
reviewers:
- bowei
- cjcullen
- gmarek
- jszczepkowski
- vishh
@ -9,8 +10,10 @@ reviewers:
- MaciekPytel
- jingax10
- yujuhong
- zmerlynn
approvers:
- bowei
- cjcullen
- gmarek
- jszczepkowski
- vishh
@ -18,3 +21,4 @@ approvers:
- MaciekPytel
- jingax10
- yujuhong
- zmerlynn

View File

@ -1593,6 +1593,10 @@ function start-kube-apiserver {
params+=" --etcd-servers-overrides=${ETCD_SERVERS_OVERRIDES:-}"
fi
params+=" --secure-port=443"
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-true}" != "true" ]]; then
# Default is :8080
params+=" --insecure-port=0"
fi
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"
@ -1878,7 +1882,6 @@ function start-kube-apiserver {
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"

View File

@ -39,29 +39,30 @@ ACI_DIR=${STAGING_DIR}/gci-mounter
CWD=${PWD}
# Cleanup the temporary directories
function cleanup {
rm -rf ${DOWNLOAD_DIR}
rm -rf ${STAGING_DIR}
cd ${CWD}
cleanup() {
rm -rf "${DOWNLOAD_DIR}"
rm -rf "${STAGING_DIR}"
cd "${CWD}"
}
# Delete temporary directories on exit
trap cleanup EXIT
mkdir ${ACI_DIR}
mkdir "${ACI_DIR}"
# Convert docker image to aci and stage it
echo "Downloading docker2aci ${DOCKER2ACI_VERSION}"
wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" &> /dev/null
wget "https://github.com/appc/docker2aci/releases/download/${DOCKER2ACI_VERSION}/docker2aci-${DOCKER2ACI_VERSION}.tar.gz" >/dev/null 2>&1
echo "Extracting docker2aci ${DOCKER2ACI_VERSION}"
tar xzf docker2aci-${DOCKER2ACI_VERSION}.tar.gz
ACI_IMAGE=$(${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci ${DOCKER_IMAGE} 2>/dev/null | tail -n 1)
cp ${ACI_IMAGE} ${ACI_DIR}/${MOUNTER_ACI_IMAGE}
ACI_IMAGE=$("${DOWNLOAD_DIR}/docker2aci-${DOCKER2ACI_VERSION}/docker2aci" "${DOCKER_IMAGE}" 2>/dev/null | tail -n 1)
cp "${ACI_IMAGE}" "${ACI_DIR}/${MOUNTER_ACI_IMAGE}"
# Upload the contents to gcs
echo "Uploading gci mounter ACI in ${ACI_DIR} to ${MOUNTER_GCS_DIR}"
gsutil cp ${ACI_DIR}/${MOUNTER_ACI_IMAGE} ${MOUNTER_GCS_DIR}
gsutil cp "${ACI_DIR}/${MOUNTER_ACI_IMAGE}" "${MOUNTER_GCS_DIR}"
echo "Upload completed"
echo "Updated gci-mounter ACI version and SHA1 in cluster/gce/gci/configure.sh"
echo "${MOUNTER_ACI_IMAGE} hash: $(sha1sum ${ACI_DIR}/${MOUNTER_ACI_IMAGE})"
ACI_HASH=$(sha1sum "${ACI_DIR}/${MOUNTER_ACI_IMAGE}")
echo "${MOUNTER_ACI_IMAGE} hash: ${ACI_HASH}"

View File

@ -16,7 +16,7 @@
# A script that let's gci preemptible nodes gracefully terminate in the event of a VM shutdown.
preemptible=$(curl "http://metadata.google.internal/computeMetadata/v1/instance/scheduling/preemptible" -H "Metadata-Flavor: Google")
if [ ${preemptible} == "TRUE" ]; then
if [ "${preemptible}" == "TRUE" ]; then
echo "Shutting down! Sleeping for a minute to let the node gracefully terminate"
# https://cloud.google.com/compute/docs/instances/stopping-or-deleting-an-instance#delete_timeout
sleep 30

View File

@ -49,20 +49,20 @@ function gcloud-list() {
local attempt=1
local result=""
while true; do
if result=$(gcloud ${group} ${resource} list --project=${PROJECT} ${filter:+--filter="$filter"} ${@:4}); then
if [[ ! -z "${GREP_REGEX}" ]]; then
if result=$(gcloud "${group}" "${resource}" list --project="${PROJECT}" ${filter:+--filter="$filter"} "${@:4}"); then
if [[ -n "${GREP_REGEX:-}" ]]; then
result=$(echo "${result}" | grep "${GREP_REGEX}" || true)
fi
echo "${result}"
return
fi
echo -e "Attempt ${attempt} failed to list ${resource}. Retrying." >&2
attempt=$(($attempt+1))
attempt=$((attempt + 1))
if [[ ${attempt} -gt 5 ]]; then
echo -e "List ${resource} failed!" >&2
exit 2
fi
sleep $((5*${attempt}))
sleep $((5 * attempt))
done
}

View File

@ -32,8 +32,9 @@
{{container_env}}
"livenessProbe": {
"httpGet": {
"scheme": "HTTPS",
"host": "127.0.0.1",
"port": 8080,
"port": {{secure_port}},
"path": "/healthz?exclude=etcd"
},
"initialDelaySeconds": {{liveness_probe_initial_delay}},
@ -41,8 +42,9 @@
},
"readinessProbe": {
"httpGet": {
"scheme": "HTTPS",
"host": "127.0.0.1",
"port": 8080,
"port": {{secure_port}},
"path": "/healthz"
},
"periodSeconds": 1,

View File

@ -2031,7 +2031,9 @@ function create-node-template() {
if [[ "${os}" == 'linux' ]]; then
node_image_flags="--image-project ${NODE_IMAGE_PROJECT} --image ${NODE_IMAGE}"
elif [[ "${os}" == 'windows' ]]; then
node_image_flags="--image-project ${WINDOWS_NODE_IMAGE_PROJECT} --image-family ${WINDOWS_NODE_IMAGE_FAMILY}"
# TODO(pjh): revert back to using WINDOWS_NODE_IMAGE_FAMILY instead of
# pinning to the v20190312 image once #76666 is resolved.
node_image_flags="--image-project ${WINDOWS_NODE_IMAGE_PROJECT} --image=windows-server-1809-dc-core-for-containers-v20190312"
else
echo "Unknown OS ${os}" >&2
exit 1

View File

@ -112,10 +112,10 @@ try {
Set-EnvironmentVars
Create-Directories
Download-HelperScripts
# Disable Stackdrver logging until issue is fixed.
# InstallAndStart-LoggingAgent
InstallAndStart-LoggingAgent
Create-DockerRegistryKey
Configure-Dockerd
DownloadAndInstall-KubernetesBinaries
Create-NodePki
Create-KubeletKubeconfig

View File

@ -270,11 +270,13 @@ function Disable-WindowsDefender {
# Creates directories where other functions in this module will read and write
# data.
# Note: C:\tmp is required for running certain kubernetes tests.
# C:\var\log is used by kubelet to stored container logs and also
# hard-coded in the fluentd/stackdriver config for log collection.
function Create-Directories {
Log-Output "Creating ${env:K8S_DIR} and its subdirectories."
ForEach ($dir in ("${env:K8S_DIR}", "${env:NODE_DIR}", "${env:LOGS_DIR}",
"${env:CNI_DIR}", "${env:CNI_CONFIG_DIR}", "${env:MANIFESTS_DIR}",
"${env:PKI_DIR}"), "C:\tmp") {
"${env:PKI_DIR}"), "C:\tmp", "C:\var\log") {
mkdir -Force $dir
}
}
@ -1056,10 +1058,25 @@ function Create-DockerRegistryKey {
Remove-Item -Force -Recurse ${tmp_dir}
}
# Configure Docker daemon and restart the service.
function Configure-Dockerd {
Set-Content "C:\ProgramData\docker\config\daemon.json" @'
{
"log-driver": "json-file",
"log-opts": {
"max-size": "1m",
"max-file": "5"
}
}
'@
Restart-Service Docker
}
# TODO(pjh): move the Stackdriver logging agent code below into a separate
# module; it was put here temporarily to avoid disrupting the file layout in
# the K8s release machinery.
$STACKDRIVER_VERSION = 'v1-8'
$STACKDRIVER_VERSION = 'v1-9'
$STACKDRIVER_ROOT = 'C:\Program Files (x86)\Stackdriver'
# Install and start the Stackdriver logging agent according to
@ -1123,9 +1140,6 @@ function InstallAndStart-LoggingAgent {
Remove-Item -Force -Recurse $tmp_dir
}
# TODO(yujuhong):
# - Collect kubelet/kube-proxy logs.
# - Add tag for kubernetes node name.
$FLUENTD_CONFIG = @'
# This configuration file for Fluentd is used to watch changes to kubernetes
# container logs in the directory /var/lib/docker/containers/ and submit the
@ -1184,6 +1198,34 @@ $FLUENTD_CONFIG = @'
read_from_head true
</source>
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /etc/kubernetes/logs/kubelet.log
pos_file /etc/kubernetes/logs/gcp-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
@type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /etc/kubernetes/logs/kube-proxy.log
pos_file /etc/kubernetes/logs/gcp-kube-proxy.log.pos
tag kube-proxy
</source>
<match reform.**>
@type record_reformer
enable_ruby true
@ -1203,7 +1245,17 @@ $FLUENTD_CONFIG = @'
tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
remove_keys stream,log
</match>
'@
# Attach local_resource_id for 'k8s_node' monitored resource.
<filter **>
@type record_transformer
enable_ruby true
<record>
"logging.googleapis.com/local_resource_id" ${"k8s_node.NODE_NAME"}
</record>
</filter>
'@.replace('NODE_NAME', (hostname))
# Export all public functions:
Export-ModuleMember -Function *-*

View File

@ -19,6 +19,7 @@
function get-windows-node-instance-metadata-from-file {
local metadata=""
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt,"
metadata+="cluster-location=${KUBE_TEMP}/cluster-location.txt,"
metadata+="kube-env=${KUBE_TEMP}/windows-node-kube-env.yaml,"
metadata+="kubelet-config=${KUBE_TEMP}/windows-node-kubelet-config.yaml,"
# To get startup script output run "gcloud compute instances

View File

@ -32,27 +32,6 @@ fi
source "${KUBE_ROOT}/cluster/kube-util.sh"
DEPRECATED_PROVIDERS=(
"centos"
"local"
)
for provider in "${DEPRECATED_PROVIDERS[@]}"; do
if [[ "${KUBERNETES_PROVIDER}" == "${provider}" ]]; then
cat <<EOF 1>&2
!!! DEPRECATION NOTICE !!!
The '${provider}' kube-up provider is deprecated and will be removed in a future
release of kubernetes. Deprecated providers will be removed within 2 releases.
See https://github.com/kubernetes/kubernetes/issues/49213 for more info.
EOF
break
fi
done
if [ -z "${ZONE-}" ]; then
echo "... Starting cluster using provider: ${KUBERNETES_PROVIDER}" >&2
else

View File

@ -1,33 +0,0 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Local doesn't need special preparations for e2e tests" 1>&2
}
# Detect the IP for the master
#
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
# Vars exported:
# KUBE_MASTER_URL
function detect-master {
KUBE_MASTER=localhost
KUBE_MASTER_IP=127.0.0.1
export KUBE_MASTER_URL="http://${KUBE_MASTER_IP}:8080"
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}

View File

@ -63,7 +63,7 @@ readonly max_dump_processes=25
# TODO: Get rid of all the sourcing of bash dependencies eventually.
function setup() {
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
if [[ -z "${use_custom_instance_list}" ]]; then
: ${KUBE_CONFIG_FILE:="config-test.sh"}
echo "Sourcing kube-util.sh"

View File

@ -18,7 +18,7 @@
# pre-existing Kubernetes master. See test/kubemark/pre-existing/README.md
# for me details on using a pre-existing provider.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/cluster/common.sh"
source "${KUBE_ROOT}/hack/lib/util.sh"

View File

@ -21,7 +21,7 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/cluster/kube-util.sh"
echo "Testing cluster with provider: ${KUBERNETES_PROVIDER}" 1>&2

View File

@ -24,7 +24,6 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
KUBE_CONFIG_FILE="config-default.sh"
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
${KUBE_ROOT}/hack/ginkgo-e2e.sh --ginkgo.focus=Networking
"${KUBE_ROOT}/hack/ginkgo-e2e.sh" --ginkgo.focus=Networking

View File

@ -23,10 +23,8 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
TEST_ARGS="$@"
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
SMOKE_TEST_FOCUS_REGEX="Guestbook.application"
exec "${KUBE_ROOT}/cluster/test-e2e.sh" -ginkgo.focus="${SMOKE_TEST_FOCUS_REGEX}" ${TEST_ARGS}
exec "${KUBE_ROOT}/cluster/test-e2e.sh" -ginkgo.focus="${SMOKE_TEST_FOCUS_REGEX}" "$@"

View File

@ -24,7 +24,7 @@ set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then
source "${KUBE_ROOT}/cluster/env.sh"

View File

@ -24,10 +24,9 @@ import (
// GroupName is the group name used in this package
const GroupName = "cloudcontrollermanager.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme

View File

@ -24,10 +24,9 @@ import (
// GroupName is the group name use in this package
const GroupName = "cloudcontrollermanager.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
var (
// GroupName is the group name use in this package
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// localSchemeBuilder ïs a pointer to SchemeBuilder instance. Using localSchemeBuilder

View File

@ -31,6 +31,7 @@ go_library(
"//pkg/proxy/iptables:go_default_library",
"//pkg/proxy/ipvs:go_default_library",
"//pkg/proxy/userspace:go_default_library",
"//pkg/proxy/util:go_default_library",
"//pkg/util/configz:go_default_library",
"//pkg/util/filesystem:go_default_library",
"//pkg/util/flag:go_default_library",

View File

@ -61,6 +61,7 @@ import (
"k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/proxy/ipvs"
"k8s.io/kubernetes/pkg/proxy/userspace"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/util/configz"
"k8s.io/kubernetes/pkg/util/filesystem"
utilflag "k8s.io/kubernetes/pkg/util/flag"
@ -214,8 +215,8 @@ func NewOptions() *Options {
func (o *Options) Complete() error {
if len(o.ConfigFile) == 0 && len(o.WriteConfigTo) == 0 {
klog.Warning("WARNING: all flags other than --config, --write-config-to, and --cleanup are deprecated. Please begin using a config file ASAP.")
o.applyDeprecatedHealthzPortToConfig()
o.applyDeprecatedMetricsPortToConfig()
o.config.HealthzBindAddress = addressFromDeprecatedFlags(o.config.HealthzBindAddress, o.healthzPort)
o.config.MetricsBindAddress = addressFromDeprecatedFlags(o.config.MetricsBindAddress, o.metricsPort)
}
// Load the config file here in Complete, so that Validate validates the fully-resolved config.
@ -365,38 +366,15 @@ func (o *Options) writeConfigFile() error {
return nil
}
// applyDeprecatedHealthzPortToConfig sets o.config.HealthzBindAddress from
// flags passed on the command line based on the following rules:
//
// 1. If --healthz-port is 0, disable the healthz server.
// 2. Otherwise, use the value of --healthz-port for the port portion of
// o.config.HealthzBindAddress
func (o *Options) applyDeprecatedHealthzPortToConfig() {
if o.healthzPort == 0 {
o.config.HealthzBindAddress = ""
return
// addressFromDeprecatedFlags returns server address from flags
// passed on the command line based on the following rules:
// 1. If port is 0, disable the server (e.g. set address to empty).
// 2. Otherwise, set the port portion of the config accordingly.
func addressFromDeprecatedFlags(addr string, port int32) string {
if port == 0 {
return ""
}
index := strings.Index(o.config.HealthzBindAddress, ":")
if index != -1 {
o.config.HealthzBindAddress = o.config.HealthzBindAddress[0:index]
}
o.config.HealthzBindAddress = fmt.Sprintf("%s:%d", o.config.HealthzBindAddress, o.healthzPort)
}
func (o *Options) applyDeprecatedMetricsPortToConfig() {
if o.metricsPort == 0 {
o.config.MetricsBindAddress = ""
return
}
index := strings.Index(o.config.MetricsBindAddress, ":")
if index != -1 {
o.config.MetricsBindAddress = o.config.MetricsBindAddress[0:index]
}
o.config.MetricsBindAddress = fmt.Sprintf("%s:%d", o.config.MetricsBindAddress, o.metricsPort)
return proxyutil.AppendPortIfNeeded(addr, port)
}
// loadConfigFromFile loads the contents of file and decodes it as a

View File

@ -559,3 +559,79 @@ func (s *fakeProxyServerError) Run() error {
func (s *fakeProxyServerError) CleanupAndExit() error {
return errors.New("mocking error from ProxyServer.CleanupAndExit()")
}
func TestAddressFromDeprecatedFlags(t *testing.T) {
testCases := []struct {
name string
healthzPort int32
healthzBindAddress string
metricsPort int32
metricsBindAddress string
expHealthz string
expMetrics string
}{
{
name: "IPv4 bind address",
healthzBindAddress: "1.2.3.4",
healthzPort: 12345,
metricsBindAddress: "2.3.4.5",
metricsPort: 23456,
expHealthz: "1.2.3.4:12345",
expMetrics: "2.3.4.5:23456",
},
{
name: "IPv4 bind address has port",
healthzBindAddress: "1.2.3.4:12345",
healthzPort: 23456,
metricsBindAddress: "2.3.4.5:12345",
metricsPort: 23456,
expHealthz: "1.2.3.4:12345",
expMetrics: "2.3.4.5:12345",
},
{
name: "IPv6 bind address",
healthzBindAddress: "fd00:1::5",
healthzPort: 12345,
metricsBindAddress: "fd00:1::6",
metricsPort: 23456,
expHealthz: "[fd00:1::5]:12345",
expMetrics: "[fd00:1::6]:23456",
},
{
name: "IPv6 bind address has port",
healthzBindAddress: "[fd00:1::5]:12345",
healthzPort: 56789,
metricsBindAddress: "[fd00:1::6]:56789",
metricsPort: 12345,
expHealthz: "[fd00:1::5]:12345",
expMetrics: "[fd00:1::6]:56789",
},
{
name: "Invalid IPv6 Config",
healthzBindAddress: "[fd00:1::5]",
healthzPort: 12345,
metricsBindAddress: "[fd00:1::6]",
metricsPort: 56789,
expHealthz: "[fd00:1::5]",
expMetrics: "[fd00:1::6]",
},
}
for i := range testCases {
gotHealthz := addressFromDeprecatedFlags(testCases[i].healthzBindAddress, testCases[i].healthzPort)
gotMetrics := addressFromDeprecatedFlags(testCases[i].metricsBindAddress, testCases[i].metricsPort)
errFn := func(name, except, got string) {
t.Errorf("case %s: expected %v, got %v", name, except, got)
}
if gotHealthz != testCases[i].expHealthz {
errFn(testCases[i].name, testCases[i].expHealthz, gotHealthz)
}
if gotMetrics != testCases[i].expMetrics {
errFn(testCases[i].name, testCases[i].expMetrics, gotMetrics)
}
}
}

View File

@ -44,6 +44,7 @@ filegroup(
"//cmd/kubeadm/app/apis/kubeadm/fuzzer:all-srcs",
"//cmd/kubeadm/app/apis/kubeadm/scheme:all-srcs",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:all-srcs",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:all-srcs",
"//cmd/kubeadm/app/apis/kubeadm/validation:all-srcs",
],
tags = ["automanaged"],

View File

@ -12,7 +12,7 @@ go_library(
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/fuzzer",
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",

View File

@ -21,7 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
@ -56,18 +56,18 @@ func fuzzInitConfiguration(obj *kubeadm.InitConfiguration, c fuzz.Continue) {
DNS: kubeadm.DNS{
Type: kubeadm.CoreDNS,
},
CertificatesDir: v1beta1.DefaultCertificatesDir,
ClusterName: v1beta1.DefaultClusterName,
CertificatesDir: v1beta2.DefaultCertificatesDir,
ClusterName: v1beta2.DefaultClusterName,
Etcd: kubeadm.Etcd{
Local: &kubeadm.LocalEtcd{
DataDir: v1beta1.DefaultEtcdDataDir,
DataDir: v1beta2.DefaultEtcdDataDir,
},
},
ImageRepository: v1beta1.DefaultImageRepository,
KubernetesVersion: v1beta1.DefaultKubernetesVersion,
ImageRepository: v1beta2.DefaultImageRepository,
KubernetesVersion: v1beta2.DefaultKubernetesVersion,
Networking: kubeadm.Networking{
ServiceSubnet: v1beta1.DefaultServicesSubnet,
DNSDomain: v1beta1.DefaultServiceDNSDomain,
ServiceSubnet: v1beta2.DefaultServicesSubnet,
DNSDomain: v1beta2.DefaultServiceDNSDomain,
},
}
// Adds the default bootstrap token to get the round working

View File

@ -8,6 +8,7 @@ go_library(
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",

View File

@ -24,6 +24,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
)
// Scheme is the runtime.Scheme to which all kubeadm api types are registered.
@ -41,5 +42,6 @@ func init() {
func AddToScheme(scheme *runtime.Scheme) {
utilruntime.Must(kubeadm.AddToScheme(scheme))
utilruntime.Must(v1beta1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion))
utilruntime.Must(v1beta2.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(v1beta2.SchemeGroupVersion))
}

View File

@ -0,0 +1,52 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bootstraptokenstring.go",
"defaults.go",
"defaults_unix.go",
"defaults_windows.go",
"doc.go",
"register.go",
"types.go",
"zz_generated.conversion.go",
"zz_generated.deepcopy.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/util:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["bootstraptokenstring_test.go"],
embed = [":go_default_library"],
deps = ["//vendor/github.com/pkg/errors:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,88 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"fmt"
"strings"
"github.com/pkg/errors"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
)
// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used
// for both validation of the practically of the API server from a joining node's point
// of view and as an authentication method for the node in the bootstrap phase of
// "kubeadm join". This token is and should be short-lived
type BootstrapTokenString struct {
ID string
Secret string
}
// MarshalJSON implements the json.Marshaler interface.
func (bts BootstrapTokenString) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, bts.String())), nil
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error {
// If the token is represented as "", just return quickly without an error
if len(b) == 0 {
return nil
}
// Remove unnecessary " characters coming from the JSON parser
token := strings.Replace(string(b), `"`, ``, -1)
// Convert the string Token to a BootstrapTokenString object
newbts, err := NewBootstrapTokenString(token)
if err != nil {
return err
}
bts.ID = newbts.ID
bts.Secret = newbts.Secret
return nil
}
// String returns the string representation of the BootstrapTokenString
func (bts BootstrapTokenString) String() string {
if len(bts.ID) > 0 && len(bts.Secret) > 0 {
return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret)
}
return ""
}
// NewBootstrapTokenString converts the given Bootstrap Token as a string
// to the BootstrapTokenString object used for serialization/deserialization
// and internal usage. It also automatically validates that the given token
// is of the right format
func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) {
substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token)
// TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works)
if len(substrs) != 3 {
return nil, errors.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern)
}
return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil
}
// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString
// that allows the caller to specify the ID and Secret separately
func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) {
return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret))
}

View File

@ -0,0 +1,249 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"encoding/json"
"reflect"
"testing"
"github.com/pkg/errors"
)
func TestMarshalJSON(t *testing.T) {
var tests = []struct {
bts BootstrapTokenString
expected string
}{
{BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, `"abcdef.abcdef0123456789"`},
{BootstrapTokenString{ID: "foo", Secret: "bar"}, `"foo.bar"`},
{BootstrapTokenString{ID: "h", Secret: "b"}, `"h.b"`},
}
for _, rt := range tests {
t.Run(rt.bts.ID, func(t *testing.T) {
b, err := json.Marshal(rt.bts)
if err != nil {
t.Fatalf("json.Marshal returned an unexpected error: %v", err)
}
if string(b) != rt.expected {
t.Errorf(
"failed BootstrapTokenString.MarshalJSON:\n\texpected: %s\n\t actual: %s",
rt.expected,
string(b),
)
}
})
}
}
func TestUnmarshalJSON(t *testing.T) {
var tests = []struct {
input string
bts *BootstrapTokenString
expectedError bool
}{
{`"f.s"`, &BootstrapTokenString{}, true},
{`"abcdef."`, &BootstrapTokenString{}, true},
{`"abcdef:abcdef0123456789"`, &BootstrapTokenString{}, true},
{`abcdef.abcdef0123456789`, &BootstrapTokenString{}, true},
{`"abcdef.abcdef0123456789`, &BootstrapTokenString{}, true},
{`"abcdef.ABCDEF0123456789"`, &BootstrapTokenString{}, true},
{`"abcdef.abcdef0123456789"`, &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, false},
{`"123456.aabbccddeeffgghh"`, &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}, false},
}
for _, rt := range tests {
t.Run(rt.input, func(t *testing.T) {
newbts := &BootstrapTokenString{}
err := json.Unmarshal([]byte(rt.input), newbts)
if (err != nil) != rt.expectedError {
t.Errorf("failed BootstrapTokenString.UnmarshalJSON:\n\texpected error: %t\n\t actual error: %v", rt.expectedError, err)
} else if !reflect.DeepEqual(rt.bts, newbts) {
t.Errorf(
"failed BootstrapTokenString.UnmarshalJSON:\n\texpected: %v\n\t actual: %v",
rt.bts,
newbts,
)
}
})
}
}
func TestJSONRoundtrip(t *testing.T) {
var tests = []struct {
input string
bts *BootstrapTokenString
}{
{`"abcdef.abcdef0123456789"`, nil},
{"", &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}},
}
for _, rt := range tests {
t.Run(rt.input, func(t *testing.T) {
if err := roundtrip(rt.input, rt.bts); err != nil {
t.Errorf("failed BootstrapTokenString JSON roundtrip with error: %v", err)
}
})
}
}
func roundtrip(input string, bts *BootstrapTokenString) error {
var b []byte
var err error
newbts := &BootstrapTokenString{}
// If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string
if len(input) > 0 {
if err := json.Unmarshal([]byte(input), newbts); err != nil {
return errors.Wrap(err, "expected no unmarshal error, got error")
}
if b, err = json.Marshal(newbts); err != nil {
return errors.Wrap(err, "expected no marshal error, got error")
}
if input != string(b) {
return errors.Errorf(
"expected token: %s\n\t actual: %s",
input,
string(b),
)
}
} else { // Otherwise, roundtrip like this: object -> (marshal) -> string -> (unmarshal) -> object
if b, err = json.Marshal(bts); err != nil {
return errors.Wrap(err, "expected no marshal error, got error")
}
if err := json.Unmarshal(b, newbts); err != nil {
return errors.Wrap(err, "expected no unmarshal error, got error")
}
if !reflect.DeepEqual(bts, newbts) {
return errors.Errorf(
"expected object: %v\n\t actual: %v",
bts,
newbts,
)
}
}
return nil
}
func TestTokenFromIDAndSecret(t *testing.T) {
var tests = []struct {
bts BootstrapTokenString
expected string
}{
{BootstrapTokenString{ID: "foo", Secret: "bar"}, "foo.bar"},
{BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, "abcdef.abcdef0123456789"},
{BootstrapTokenString{ID: "h", Secret: "b"}, "h.b"},
}
for _, rt := range tests {
t.Run(rt.bts.ID, func(t *testing.T) {
actual := rt.bts.String()
if actual != rt.expected {
t.Errorf(
"failed BootstrapTokenString.String():\n\texpected: %s\n\t actual: %s",
rt.expected,
actual,
)
}
})
}
}
func TestNewBootstrapTokenString(t *testing.T) {
var tests = []struct {
token string
expectedError bool
bts *BootstrapTokenString
}{
{token: "", expectedError: true, bts: nil},
{token: ".", expectedError: true, bts: nil},
{token: "1234567890123456789012", expectedError: true, bts: nil}, // invalid parcel size
{token: "12345.1234567890123456", expectedError: true, bts: nil}, // invalid parcel size
{token: ".1234567890123456", expectedError: true, bts: nil}, // invalid parcel size
{token: "123456.", expectedError: true, bts: nil}, // invalid parcel size
{token: "123456:1234567890.123456", expectedError: true, bts: nil}, // invalid separation
{token: "abcdef:1234567890123456", expectedError: true, bts: nil}, // invalid separation
{token: "Abcdef.1234567890123456", expectedError: true, bts: nil}, // invalid token id
{token: "123456.AABBCCDDEEFFGGHH", expectedError: true, bts: nil}, // invalid token secret
{token: "123456.AABBCCD-EEFFGGHH", expectedError: true, bts: nil}, // invalid character
{token: "abc*ef.1234567890123456", expectedError: true, bts: nil}, // invalid character
{token: "abcdef.1234567890123456", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "1234567890123456"}},
{token: "123456.aabbccddeeffgghh", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}},
{token: "abcdef.abcdef0123456789", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}},
{token: "123456.1234560123456789", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "1234560123456789"}},
}
for _, rt := range tests {
t.Run(rt.token, func(t *testing.T) {
actual, err := NewBootstrapTokenString(rt.token)
if (err != nil) != rt.expectedError {
t.Errorf(
"failed NewBootstrapTokenString for the token %q\n\texpected error: %t\n\t actual error: %v",
rt.token,
rt.expectedError,
err,
)
} else if !reflect.DeepEqual(actual, rt.bts) {
t.Errorf(
"failed NewBootstrapTokenString for the token %q\n\texpected: %v\n\t actual: %v",
rt.token,
rt.bts,
actual,
)
}
})
}
}
func TestNewBootstrapTokenStringFromIDAndSecret(t *testing.T) {
var tests = []struct {
id, secret string
expectedError bool
bts *BootstrapTokenString
}{
{id: "", secret: "", expectedError: true, bts: nil},
{id: "1234567890123456789012", secret: "", expectedError: true, bts: nil}, // invalid parcel size
{id: "12345", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid parcel size
{id: "", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid parcel size
{id: "123456", secret: "", expectedError: true, bts: nil}, // invalid parcel size
{id: "Abcdef", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid token id
{id: "123456", secret: "AABBCCDDEEFFGGHH", expectedError: true, bts: nil}, // invalid token secret
{id: "123456", secret: "AABBCCD-EEFFGGHH", expectedError: true, bts: nil}, // invalid character
{id: "abc*ef", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid character
{id: "abcdef", secret: "1234567890123456", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "1234567890123456"}},
{id: "123456", secret: "aabbccddeeffgghh", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}},
{id: "abcdef", secret: "abcdef0123456789", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}},
{id: "123456", secret: "1234560123456789", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "1234560123456789"}},
}
for _, rt := range tests {
t.Run(rt.id, func(t *testing.T) {
actual, err := NewBootstrapTokenStringFromIDAndSecret(rt.id, rt.secret)
if (err != nil) != rt.expectedError {
t.Errorf(
"failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected error: %t\n\t actual error: %v",
rt.id,
rt.secret,
rt.expectedError,
err,
)
} else if !reflect.DeepEqual(actual, rt.bts) {
t.Errorf(
"failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected: %v\n\t actual: %v",
rt.id,
rt.secret,
rt.bts,
actual,
)
}
})
}
}

View File

@ -0,0 +1,215 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"net/url"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
const (
// DefaultServiceDNSDomain defines default cluster-internal domain name for Services and Pods
DefaultServiceDNSDomain = "cluster.local"
// DefaultServicesSubnet defines default service subnet range
DefaultServicesSubnet = "10.96.0.0/12"
// DefaultClusterDNSIP defines default DNS IP
DefaultClusterDNSIP = "10.96.0.10"
// DefaultKubernetesVersion defines default kubernetes version
DefaultKubernetesVersion = "stable-1"
// DefaultAPIBindPort defines default API port
DefaultAPIBindPort = 6443
// DefaultCertificatesDir defines default certificate directory
DefaultCertificatesDir = "/etc/kubernetes/pki"
// DefaultImageRepository defines default image registry
DefaultImageRepository = "k8s.gcr.io"
// DefaultManifestsDir defines default manifests directory
DefaultManifestsDir = "/etc/kubernetes/manifests"
// DefaultClusterName defines the default cluster name
DefaultClusterName = "kubernetes"
// DefaultEtcdDataDir defines default location of etcd where static pods will save data to
DefaultEtcdDataDir = "/var/lib/etcd"
// DefaultProxyBindAddressv4 is the default bind address when the advertise address is v4
DefaultProxyBindAddressv4 = "0.0.0.0"
// DefaultProxyBindAddressv6 is the default bind address when the advertise address is v6
DefaultProxyBindAddressv6 = "::"
// DefaultDiscoveryTimeout specifies the default discovery timeout for kubeadm (used unless one is specified in the JoinConfiguration)
DefaultDiscoveryTimeout = 5 * time.Minute
)
var (
// DefaultAuditPolicyLogMaxAge is defined as a var so its address can be taken
// It is the number of days to store audit logs
DefaultAuditPolicyLogMaxAge = int32(2)
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_InitConfiguration assigns default values for the InitConfiguration
func SetDefaults_InitConfiguration(obj *InitConfiguration) {
SetDefaults_ClusterConfiguration(&obj.ClusterConfiguration)
SetDefaults_BootstrapTokens(obj)
SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint)
}
// SetDefaults_ClusterConfiguration assigns default values for the ClusterConfiguration
func SetDefaults_ClusterConfiguration(obj *ClusterConfiguration) {
if obj.KubernetesVersion == "" {
obj.KubernetesVersion = DefaultKubernetesVersion
}
if obj.Networking.ServiceSubnet == "" {
obj.Networking.ServiceSubnet = DefaultServicesSubnet
}
if obj.Networking.DNSDomain == "" {
obj.Networking.DNSDomain = DefaultServiceDNSDomain
}
if obj.CertificatesDir == "" {
obj.CertificatesDir = DefaultCertificatesDir
}
if obj.ImageRepository == "" {
obj.ImageRepository = DefaultImageRepository
}
if obj.ClusterName == "" {
obj.ClusterName = DefaultClusterName
}
SetDefaults_DNS(obj)
SetDefaults_Etcd(obj)
SetDefaults_APIServer(&obj.APIServer)
}
// SetDefaults_APIServer assigns default values for the API Server
func SetDefaults_APIServer(obj *APIServer) {
if obj.TimeoutForControlPlane == nil {
obj.TimeoutForControlPlane = &metav1.Duration{
Duration: constants.DefaultControlPlaneTimeout,
}
}
}
// SetDefaults_DNS assigns default values for the DNS component
func SetDefaults_DNS(obj *ClusterConfiguration) {
if obj.DNS.Type == "" {
obj.DNS.Type = CoreDNS
}
}
// SetDefaults_Etcd assigns default values for the proxy
func SetDefaults_Etcd(obj *ClusterConfiguration) {
if obj.Etcd.External == nil && obj.Etcd.Local == nil {
obj.Etcd.Local = &LocalEtcd{}
}
if obj.Etcd.Local != nil {
if obj.Etcd.Local.DataDir == "" {
obj.Etcd.Local.DataDir = DefaultEtcdDataDir
}
}
}
// SetDefaults_JoinConfiguration assigns default values to a regular node
func SetDefaults_JoinConfiguration(obj *JoinConfiguration) {
if obj.CACertPath == "" {
obj.CACertPath = DefaultCACertPath
}
SetDefaults_JoinControlPlane(obj.ControlPlane)
SetDefaults_Discovery(&obj.Discovery)
}
func SetDefaults_JoinControlPlane(obj *JoinControlPlane) {
if obj != nil {
SetDefaults_APIEndpoint(&obj.LocalAPIEndpoint)
}
}
// SetDefaults_Discovery assigns default values for the discovery process
func SetDefaults_Discovery(obj *Discovery) {
if len(obj.TLSBootstrapToken) == 0 && obj.BootstrapToken != nil {
obj.TLSBootstrapToken = obj.BootstrapToken.Token
}
if obj.Timeout == nil {
obj.Timeout = &metav1.Duration{
Duration: DefaultDiscoveryTimeout,
}
}
if obj.File != nil {
SetDefaults_FileDiscovery(obj.File)
}
}
// SetDefaults_FileDiscovery assigns default values for file based discovery
func SetDefaults_FileDiscovery(obj *FileDiscovery) {
// Make sure file URL becomes path
if len(obj.KubeConfigPath) != 0 {
u, err := url.Parse(obj.KubeConfigPath)
if err == nil && u.Scheme == "file" {
obj.KubeConfigPath = u.Path
}
}
}
// SetDefaults_BootstrapTokens sets the defaults for the .BootstrapTokens field
// If the slice is empty, it's defaulted with one token. Otherwise it just loops
// through the slice and sets the defaults for the omitempty fields that are TTL,
// Usages and Groups. Token is NOT defaulted with a random one in the API defaulting
// layer, but set to a random value later at runtime if not set before.
func SetDefaults_BootstrapTokens(obj *InitConfiguration) {
if obj.BootstrapTokens == nil || len(obj.BootstrapTokens) == 0 {
obj.BootstrapTokens = []BootstrapToken{{}}
}
for i := range obj.BootstrapTokens {
SetDefaults_BootstrapToken(&obj.BootstrapTokens[i])
}
}
// SetDefaults_BootstrapToken sets the defaults for an individual Bootstrap Token
func SetDefaults_BootstrapToken(bt *BootstrapToken) {
if bt.TTL == nil {
bt.TTL = &metav1.Duration{
Duration: constants.DefaultTokenDuration,
}
}
if len(bt.Usages) == 0 {
bt.Usages = constants.DefaultTokenUsages
}
if len(bt.Groups) == 0 {
bt.Groups = constants.DefaultTokenGroups
}
}
// SetDefaults_APIEndpoint sets the defaults for the API server instance deployed on a node.
func SetDefaults_APIEndpoint(obj *APIEndpoint) {
if obj.BindPort == 0 {
obj.BindPort = DefaultAPIBindPort
}
}

View File

@ -0,0 +1,26 @@
// +build !windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
const (
// DefaultCACertPath defines default location of CA certificate on Linux
DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
// DefaultUrlScheme defines default socket url prefix
DefaultUrlScheme = "unix"
)

View File

@ -0,0 +1,26 @@
// +build windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
const (
// DefaultCACertPath defines default location of CA certificate on Windows
DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt"
// DefaultUrlScheme defines default socket url prefix
DefaultUrlScheme = "tcp"
)

View File

@ -0,0 +1,280 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:defaulter-gen=TypeMeta
// +groupName=kubeadm.k8s.io
// +k8s:deepcopy-gen=package
// +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm
// Package v1beta2 defines the v1beta2 version of the kubeadm configuration file format.
// This version graduates the configuration format to BETA and is a big step towards GA.
//
//A list of changes since v1alpha3:
// - "apiServerEndpoint" in InitConfiguration was renamed to "localAPIEndpoint" for better clarity of what the field
// represents.
// - Common fields in ClusterConfiguration such as "*extraArgs" and "*extraVolumes" for control plane components are now moved
// under component structs - i.e. "apiServer", "controllerManager", "scheduler".
// - "auditPolicy" was removed from ClusterConfiguration. Please use "extraArgs" in "apiServer" to configure this feature instead.
// - "unifiedControlPlaneImage" in ClusterConfiguration was changed to a boolean field called "useHyperKubeImage".
// - ClusterConfiguration now has a "dns" field which can be used to select and configure the cluster DNS addon.
// - "featureGates" still exists under ClusterConfiguration, but there are no supported feature gates in 1.13.
// See the Kubernetes 1.13 changelog for further details.
// - Both "localEtcd" and "dns" configurations now support custom image repositories.
// - The "controlPlane*"-related fields in JoinConfiguration were refactored into a sub-structure.
// - "clusterName" was removed from JoinConfiguration and the name is now fetched from the existing cluster.
//
// Migration from old kubeadm config versions
//
// Please convert your v1alpha3 configuration files to v1beta2 using the "kubeadm config migrate" command of kubeadm v1.13.x
// (conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g.
// kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3)
//
// Nevertheless, kubeadm v1.13.x will support reading from v1alpha3 version of the kubeadm config file format, but this support
// will be dropped in the v1.14 release.
//
// Basics
//
// The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the
// configuration options defined in the kubeadm config file are also available as command line flags, but only
// the most common/simple use case are supported with this approach.
//
// A kubeadm config file could contain multiple configuration types separated using three dashes (“---”).
//
// kubeadm supports the following configuration types:
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration
//
// apiVersion: kubelet.config.k8s.io/v1beta2
// kind: KubeletConfiguration
//
// apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: JoinConfiguration
//
// To print the defaults for "init" and "join" actions use the following commands:
// kubeadm config print init-defaults
// kubeadm config print join-defaults
//
// The list of configuration types that must be included in a configuration file depends by the action you are
// performing (init or join) and by the configuration options you are going to use (defaults or advanced customization).
//
// If some configuration types are not provided, or provided only partially, kubeadm will use default values; defaults
// provided by kubeadm includes also enforcing consistency of values across components when required (e.g.
// cluster-cidr flag on controller manager and clusterCIDR on kube-proxy).
//
// Users are always allowed to override default values, with the only exception of a small subset of setting with
// relevance for security (e.g. enforce authorization-mode Node and RBAC on api server)
//
// If the user provides a configuration types that is not expected for the action you are performing, kubeadm will
// ignore those types and print a warning.
//
// Kubeadm init configuration types
//
// When executing kubeadm init with the --config option, the following configuration types could be used:
// InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one
// between InitConfiguration and ClusterConfiguration is mandatory.
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration
// bootstrapTokens:
// ...
// nodeRegistration:
// ...
//
// The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init
// are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm
// is executed, including:
//
// - NodeRegistration, that holds fields that relate to registering the new node to the cluster;
// use it to customize the node name, the CRI socket to use or any other settings that should apply to this
// node only (e.g. the node ip).
//
// - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node;
// use it e.g. to customize the API server advertise address.
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration
// networking:
// ...
// etcd:
// ...
// apiServer:
// extraArgs:
// ...
// extraVolumes:
// ...
// ...
//
// The ClusterConfiguration type should be used to configure cluster-wide settings,
// including settings for:
//
// - Networking, that holds configuration for the networking topology of the cluster; use it e.g. to customize
// node subnet or services subnet.
//
// - Etcd configurations; use it e.g. to customize the local etcd or to configure the API server
// for using an external etcd cluster.
//
// - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane
// components by adding customized setting or overriding kubeadm default settings.
//
// apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration
// ...
//
// The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed
// in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
//
// See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://godoc.org/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
// for kube proxy official documentation.
//
// apiVersion: kubelet.config.k8s.io/v1beta2
// kind: KubeletConfiguration
// ...
//
// The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances
// deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
//
// See https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ or https://godoc.org/k8s.io/kubelet/config/v1beta2#KubeletConfiguration
// for kubelet official documentation.
//
// Here is a fully populated example of a single YAML file containing multiple
// configuration types to be used during a `kubeadm init` run.
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration
// bootstrapTokens:
// - token: "9a08jv.c0izixklcxtmnze7"
// description: "kubeadm bootstrap token"
// ttl: "24h"
// - token: "783bde.3f89s0fje9f38fhf"
// description: "another bootstrap token"
// usages:
// - authentication
// - signing
// groups:
// - system:bootstrappers:kubeadm:default-node-token
// nodeRegistration:
// name: "ec2-10-100-0-1"
// criSocket: "/var/run/dockershim.sock"
// taints:
// - key: "kubeadmNode"
// value: "master"
// effect: "NoSchedule"
// kubeletExtraArgs:
// cgroupDriver: "cgroupfs"
// localAPIEndpoint:
// advertiseAddress: "10.100.0.1"
// bindPort: 6443
// ---
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration
// etcd:
// # one of local or external
// local:
// imageRepository: "k8s.gcr.io"
// imageTag: "3.2.24"
// dataDir: "/var/lib/etcd"
// extraArgs:
// listen-client-urls: "http://10.100.0.1:2379"
// serverCertSANs:
// - "ec2-10-100-0-1.compute-1.amazonaws.com"
// peerCertSANs:
// - "10.100.0.1"
// # external:
// # endpoints:
// # - "10.100.0.1:2379"
// # - "10.100.0.2:2379"
// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt"
// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt"
// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key"
// networking:
// serviceSubnet: "10.96.0.0/12"
// podSubnet: "10.100.0.1/24"
// dnsDomain: "cluster.local"
// kubernetesVersion: "v1.12.0"
// controlPlaneEndpoint: "10.100.0.1:6443"
// apiServer:
// extraArgs:
// authorization-mode: "Node,RBAC"
// extraVolumes:
// - name: "some-volume"
// hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path"
// readOnly: false
// pathType: File
// certSANs:
// - "10.100.1.1"
// - "ec2-10-100-0-1.compute-1.amazonaws.com"
// timeoutForControlPlane: 4m0s
// controllerManager:
// extraArgs:
// "node-cidr-mask-size": "20"
// extraVolumes:
// - name: "some-volume"
// hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path"
// readOnly: false
// pathType: File
// scheduler:
// extraArgs:
// address: "10.100.0.1"
// extraVolumes:
// - name: "some-volume"
// hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path"
// readOnly: false
// pathType: File
// certificatesDir: "/etc/kubernetes/pki"
// imageRepository: "k8s.gcr.io"
// useHyperKubeImage: false
// clusterName: "example-cluster"
// ---
// apiVersion: kubelet.config.k8s.io/v1beta2
// kind: KubeletConfiguration
// # kubelet specific options here
// ---
// apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration
// # kube-proxy specific options here
//
// Kubeadm join configuration types
//
// When executing kubeadm join with the --config option, the JoinConfiguration type should be provided.
//
// apiVersion: kubeadm.k8s.io/v1beta2
// kind: JoinConfiguration
// ...
//
// The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join
// are the discovery method used for accessing the cluster info and all the setting which are specific
// to the node where kubeadm is executed, including:
//
// - NodeRegistration, that holds fields that relate to registering the new node to the cluster;
// use it to customize the node name, the CRI socket to use or any other settings that should apply to this
// node only (e.g. the node ip).
//
// - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node.
//
package v1beta2 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
//TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future
//(probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now.

View File

@ -0,0 +1,68 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "kubeadm.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
// SchemeBuilder points to a list of functions added to Scheme.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// AddToScheme applies all the stored functions to the scheme.
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&InitConfiguration{},
&ClusterConfiguration{},
&ClusterStatus{},
&JoinConfiguration{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,390 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime
// information.
type InitConfiguration struct {
metav1.TypeMeta `json:",inline"`
// ClusterConfiguration holds the cluster-wide information, and embeds that struct (which can be (un)marshalled separately as well)
// When InitConfiguration is marshalled to bytes in the external version, this information IS NOT preserved (which can be seen from
// the `json:"-"` tag. This is due to that when InitConfiguration is (un)marshalled, it turns into two YAML documents, one for the
// InitConfiguration and ClusterConfiguration. Hence, the information must not be duplicated, and is therefore omitted here.
ClusterConfiguration `json:"-"`
// `kubeadm init`-only information. These fields are solely used the first time `kubeadm init` runs.
// After that, the information in the fields IS NOT uploaded to the `kubeadm-config` ConfigMap
// that is used by `kubeadm upgrade` for instance. These fields must be omitempty.
// BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create.
// This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature
BootstrapTokens []BootstrapToken `json:"bootstrapTokens,omitempty"`
// NodeRegistration holds fields that relate to registering the new control-plane node to the cluster
NodeRegistration NodeRegistrationOptions `json:"nodeRegistration,omitempty"`
// LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node
// In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint
// is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This
// configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible
// on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process
// fails you may set the desired value here.
LocalAPIEndpoint APIEndpoint `json:"localAPIEndpoint,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster
type ClusterConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Etcd holds configuration for etcd.
Etcd Etcd `json:"etcd"`
// Networking holds configuration for the networking topology of the cluster.
Networking Networking `json:"networking"`
// KubernetesVersion is the target version of the control plane.
KubernetesVersion string `json:"kubernetesVersion"`
// ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it
// can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port.
// In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort
// are used; in case the ControlPlaneEndpoint is specified but without a TCP port,
// the BindPort is used.
// Possible usages are:
// e.g. In a cluster with more than one control plane instances, this field should be
// assigned the address of the external load balancer in front of the
// control plane instances.
// e.g. in environments with enforced node recycling, the ControlPlaneEndpoint
// could be used for assigning a stable DNS to the control plane.
ControlPlaneEndpoint string `json:"controlPlaneEndpoint"`
// APIServer contains extra settings for the API server control plane component
APIServer APIServer `json:"apiServer,omitempty"`
// ControllerManager contains extra settings for the controller manager control plane component
ControllerManager ControlPlaneComponent `json:"controllerManager,omitempty"`
// Scheduler contains extra settings for the scheduler control plane component
Scheduler ControlPlaneComponent `json:"scheduler,omitempty"`
// DNS defines the options for the DNS add-on installed in the cluster.
DNS DNS `json:"dns"`
// CertificatesDir specifies where to store or look for all required certificates.
CertificatesDir string `json:"certificatesDir"`
// ImageRepository sets the container registry to pull images from.
// If empty, `k8s.gcr.io` will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with `ci/` or `ci-cross/`)
// `gcr.io/kubernetes-ci-images` will be used as a default for control plane components and for kube-proxy, while `k8s.gcr.io`
// will be used for all the other images.
ImageRepository string `json:"imageRepository"`
// UseHyperKubeImage controls if hyperkube should be used for Kubernetes components instead of their respective separate images
UseHyperKubeImage bool `json:"useHyperKubeImage,omitempty"`
// FeatureGates enabled by the user.
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// The cluster name
ClusterName string `json:"clusterName,omitempty"`
}
// ControlPlaneComponent holds settings common to control plane component of the cluster
type ControlPlaneComponent struct {
// ExtraArgs is an extra set of flags to pass to the control plane component.
// TODO: This is temporary and ideally we would like to switch all components to
// use ComponentConfig + ConfigMaps.
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
// ExtraVolumes is an extra set of host volumes, mounted to the control plane component.
ExtraVolumes []HostPathMount `json:"extraVolumes,omitempty"`
}
// APIServer holds settings necessary for API server deployments in the cluster
type APIServer struct {
ControlPlaneComponent `json:",inline"`
// CertSANs sets extra Subject Alternative Names for the API Server signing cert.
CertSANs []string `json:"certSANs,omitempty"`
// TimeoutForControlPlane controls the timeout that we use for API server to appear
TimeoutForControlPlane *metav1.Duration `json:"timeoutForControlPlane,omitempty"`
}
// DNSAddOnType defines string identifying DNS add-on types
type DNSAddOnType string
const (
// CoreDNS add-on type
CoreDNS DNSAddOnType = "CoreDNS"
// KubeDNS add-on type
KubeDNS DNSAddOnType = "kube-dns"
)
// DNS defines the DNS addon that should be used in the cluster
type DNS struct {
// Type defines the DNS add-on to be used
Type DNSAddOnType `json:"type"`
// ImageMeta allows to customize the image used for the DNS component
ImageMeta `json:",inline"`
}
// ImageMeta allows to customize the image used for components that are not
// originated from the Kubernetes/Kubernetes release process
type ImageMeta struct {
// ImageRepository sets the container registry to pull images from.
// if not set, the ImageRepository defined in ClusterConfiguration will be used instead.
ImageRepository string `json:"imageRepository,omitempty"`
// ImageTag allows to specify a tag for the image.
// In case this value is set, kubeadm does not change automatically the version of the above components during upgrades.
ImageTag string `json:"imageTag,omitempty"`
//TODO: evaluate if we need also a ImageName based on user feedbacks
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterStatus contains the cluster status. The ClusterStatus will be stored in the kubeadm-config
// ConfigMap in the cluster, and then updated by kubeadm when additional control plane instance joins or leaves the cluster.
type ClusterStatus struct {
metav1.TypeMeta `json:",inline"`
// APIEndpoints currently available in the cluster, one for each control plane/api server instance.
// The key of the map is the IP of the host's default interface
APIEndpoints map[string]APIEndpoint `json:"apiEndpoints"`
}
// APIEndpoint struct contains elements of API server instance deployed on a node.
type APIEndpoint struct {
// AdvertiseAddress sets the IP address for the API server to advertise.
AdvertiseAddress string `json:"advertiseAddress"`
// BindPort sets the secure port for the API Server to bind to.
// Defaults to 6443.
BindPort int32 `json:"bindPort"`
}
// NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join"
type NodeRegistrationOptions struct {
// Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm join` operation.
// This field is also used in the CommonName field of the kubelet's client certificate to the API server.
// Defaults to the hostname of the node if not provided.
Name string `json:"name,omitempty"`
// CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use
CRISocket string `json:"criSocket,omitempty"`
// Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process
// it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your control-plane node, set this field to an
// empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration.
Taints []v1.Taint `json:"taints,omitempty"`
// KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file
// kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap
// Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on.
KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"`
}
// Networking contains elements describing cluster's networking configuration
type Networking struct {
// ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12".
ServiceSubnet string `json:"serviceSubnet"`
// PodSubnet is the subnet used by pods.
PodSubnet string `json:"podSubnet"`
// DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local".
DNSDomain string `json:"dnsDomain"`
}
// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster
type BootstrapToken struct {
// Token is used for establishing bidirectional trust between nodes and control-planes.
// Used for joining nodes in the cluster.
Token *BootstrapTokenString `json:"token"`
// Description sets a human-friendly message why this token exists and what it's used
// for, so other administrators can know its purpose.
Description string `json:"description,omitempty"`
// TTL defines the time to live for this token. Defaults to 24h.
// Expires and TTL are mutually exclusive.
TTL *metav1.Duration `json:"ttl,omitempty"`
// Expires specifies the timestamp when this token expires. Defaults to being set
// dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive.
Expires *metav1.Time `json:"expires,omitempty"`
// Usages describes the ways in which this token can be used. Can by default be used
// for establishing bidirectional trust, but that can be changed here.
Usages []string `json:"usages,omitempty"`
// Groups specifies the extra groups that this token will authenticate as when/if
// used for authentication
Groups []string `json:"groups,omitempty"`
}
// Etcd contains elements describing Etcd configuration.
type Etcd struct {
// Local provides configuration knobs for configuring the local etcd instance
// Local and External are mutually exclusive
Local *LocalEtcd `json:"local,omitempty"`
// External describes how to connect to an external etcd cluster
// Local and External are mutually exclusive
External *ExternalEtcd `json:"external,omitempty"`
}
// LocalEtcd describes that kubeadm should run an etcd cluster locally
type LocalEtcd struct {
// ImageMeta allows to customize the container used for etcd
ImageMeta `json:",inline"`
// DataDir is the directory etcd will place its data.
// Defaults to "/var/lib/etcd".
DataDir string `json:"dataDir"`
// ExtraArgs are extra arguments provided to the etcd binary
// when run inside a static pod.
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
// ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert.
ServerCertSANs []string `json:"serverCertSANs,omitempty"`
// PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert.
PeerCertSANs []string `json:"peerCertSANs,omitempty"`
}
// ExternalEtcd describes an external etcd cluster.
// Kubeadm has no knowledge of where certificate files live and they must be supplied.
type ExternalEtcd struct {
// Endpoints of etcd members. Required for ExternalEtcd.
Endpoints []string `json:"endpoints"`
// CAFile is an SSL Certificate Authority file used to secure etcd communication.
// Required if using a TLS connection.
CAFile string `json:"caFile"`
// CertFile is an SSL certification file used to secure etcd communication.
// Required if using a TLS connection.
CertFile string `json:"certFile"`
// KeyFile is an SSL key file used to secure etcd communication.
// Required if using a TLS connection.
KeyFile string `json:"keyFile"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// JoinConfiguration contains elements describing a particular node.
type JoinConfiguration struct {
metav1.TypeMeta `json:",inline"`
// NodeRegistration holds fields that relate to registering the new control-plane node to the cluster
NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"`
// CACertPath is the path to the SSL certificate authority used to
// secure comunications between node and control-plane.
// Defaults to "/etc/kubernetes/pki/ca.crt".
CACertPath string `json:"caCertPath"`
// Discovery specifies the options for the kubelet to use during the TLS Bootstrap process
Discovery Discovery `json:"discovery"`
// ControlPlane defines the additional control plane instance to be deployed on the joining node.
// If nil, no additional control plane instance will be deployed.
ControlPlane *JoinControlPlane `json:"controlPlane,omitempty"`
}
// JoinControlPlane contains elements describing an additional control plane instance to be deployed on the joining node.
type JoinControlPlane struct {
// LocalAPIEndpoint represents the endpoint of the API server instance to be deployed on this node.
LocalAPIEndpoint APIEndpoint `json:"localAPIEndpoint,omitempty"`
}
// Discovery specifies the options for the kubelet to use during the TLS Bootstrap process
type Discovery struct {
// BootstrapToken is used to set the options for bootstrap token based discovery
// BootstrapToken and File are mutually exclusive
BootstrapToken *BootstrapTokenDiscovery `json:"bootstrapToken,omitempty"`
// File is used to specify a file or URL to a kubeconfig file from which to load cluster information
// BootstrapToken and File are mutually exclusive
File *FileDiscovery `json:"file,omitempty"`
// TLSBootstrapToken is a token used for TLS bootstrapping.
// If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden.
// If .File is set, this field **must be set** in case the KubeConfigFile does not contain any other authentication information
TLSBootstrapToken string `json:"tlsBootstrapToken"`
// Timeout modifies the discovery timeout
Timeout *metav1.Duration `json:"timeout,omitempty"`
}
// BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery
type BootstrapTokenDiscovery struct {
// Token is a token used to validate cluster information
// fetched from the control-plane.
Token string `json:"token"`
// APIServerEndpoint is an IP or domain name to the API server from which info will be fetched.
APIServerEndpoint string `json:"apiServerEndpoint,omitempty"`
// CACertHashes specifies a set of public key pins to verify
// when token-based discovery is used. The root CA found during discovery
// must match one of these values. Specifying an empty set disables root CA
// pinning, which can be unsafe. Each hash is specified as "<type>:<value>",
// where the only currently supported type is "sha256". This is a hex-encoded
// SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded
// ASN.1. These hashes can be calculated using, for example, OpenSSL:
// openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex
CACertHashes []string `json:"caCertHashes,omitempty"`
// UnsafeSkipCAVerification allows token-based discovery
// without CA verification via CACertHashes. This can weaken
// the security of kubeadm since other nodes can impersonate the control-plane.
UnsafeSkipCAVerification bool `json:"unsafeSkipCAVerification"`
}
// FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information
type FileDiscovery struct {
// KubeConfigPath is used to specify the actual file path or URL to the kubeconfig file from which to load cluster information
KubeConfigPath string `json:"kubeConfigPath"`
}
// HostPathMount contains elements describing volumes that are mounted from the
// host.
type HostPathMount struct {
// Name of the volume inside the pod template.
Name string `json:"name"`
// HostPath is the path in the host that will be mounted inside
// the pod.
HostPath string `json:"hostPath"`
// MountPath is the path inside the pod where hostPath will be mounted.
MountPath string `json:"mountPath"`
// ReadOnly controls write access to the volume
ReadOnly bool `json:"readOnly,omitempty"`
// PathType is the type of the HostPath.
PathType v1.HostPathType `json:"pathType,omitempty"`
}

View File

@ -0,0 +1,839 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1beta2
import (
unsafe "unsafe"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*APIEndpoint)(nil), (*kubeadm.APIEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(a.(*APIEndpoint), b.(*kubeadm.APIEndpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.APIEndpoint)(nil), (*APIEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(a.(*kubeadm.APIEndpoint), b.(*APIEndpoint), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*APIServer)(nil), (*kubeadm.APIServer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_APIServer_To_kubeadm_APIServer(a.(*APIServer), b.(*kubeadm.APIServer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.APIServer)(nil), (*APIServer)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_APIServer_To_v1beta2_APIServer(a.(*kubeadm.APIServer), b.(*APIServer), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*kubeadm.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_BootstrapToken_To_kubeadm_BootstrapToken(a.(*BootstrapToken), b.(*kubeadm.BootstrapToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_BootstrapToken_To_v1beta2_BootstrapToken(a.(*kubeadm.BootstrapToken), b.(*BootstrapToken), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*BootstrapTokenDiscovery)(nil), (*kubeadm.BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(a.(*BootstrapTokenDiscovery), b.(*kubeadm.BootstrapTokenDiscovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapTokenDiscovery)(nil), (*BootstrapTokenDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(a.(*kubeadm.BootstrapTokenDiscovery), b.(*BootstrapTokenDiscovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*BootstrapTokenString)(nil), (*kubeadm.BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(a.(*BootstrapTokenString), b.(*kubeadm.BootstrapTokenString), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapTokenString)(nil), (*BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_BootstrapTokenString_To_v1beta2_BootstrapTokenString(a.(*kubeadm.BootstrapTokenString), b.(*BootstrapTokenString), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ClusterConfiguration)(nil), (*kubeadm.ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration(a.(*ClusterConfiguration), b.(*kubeadm.ClusterConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.ClusterConfiguration)(nil), (*ClusterConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration(a.(*kubeadm.ClusterConfiguration), b.(*ClusterConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ClusterStatus)(nil), (*kubeadm.ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ClusterStatus_To_kubeadm_ClusterStatus(a.(*ClusterStatus), b.(*kubeadm.ClusterStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.ClusterStatus)(nil), (*ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_ClusterStatus_To_v1beta2_ClusterStatus(a.(*kubeadm.ClusterStatus), b.(*ClusterStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ControlPlaneComponent)(nil), (*kubeadm.ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(a.(*ControlPlaneComponent), b.(*kubeadm.ControlPlaneComponent), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.ControlPlaneComponent)(nil), (*ControlPlaneComponent)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(a.(*kubeadm.ControlPlaneComponent), b.(*ControlPlaneComponent), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DNS)(nil), (*kubeadm.DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_DNS_To_kubeadm_DNS(a.(*DNS), b.(*kubeadm.DNS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.DNS)(nil), (*DNS)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_DNS_To_v1beta2_DNS(a.(*kubeadm.DNS), b.(*DNS), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Discovery)(nil), (*kubeadm.Discovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Discovery_To_kubeadm_Discovery(a.(*Discovery), b.(*kubeadm.Discovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.Discovery)(nil), (*Discovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_Discovery_To_v1beta2_Discovery(a.(*kubeadm.Discovery), b.(*Discovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Etcd)(nil), (*kubeadm.Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Etcd_To_kubeadm_Etcd(a.(*Etcd), b.(*kubeadm.Etcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.Etcd)(nil), (*Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_Etcd_To_v1beta2_Etcd(a.(*kubeadm.Etcd), b.(*Etcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ExternalEtcd)(nil), (*kubeadm.ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ExternalEtcd_To_kubeadm_ExternalEtcd(a.(*ExternalEtcd), b.(*kubeadm.ExternalEtcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.ExternalEtcd)(nil), (*ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_ExternalEtcd_To_v1beta2_ExternalEtcd(a.(*kubeadm.ExternalEtcd), b.(*ExternalEtcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*FileDiscovery)(nil), (*kubeadm.FileDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_FileDiscovery_To_kubeadm_FileDiscovery(a.(*FileDiscovery), b.(*kubeadm.FileDiscovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.FileDiscovery)(nil), (*FileDiscovery)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_FileDiscovery_To_v1beta2_FileDiscovery(a.(*kubeadm.FileDiscovery), b.(*FileDiscovery), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*HostPathMount)(nil), (*kubeadm.HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_HostPathMount_To_kubeadm_HostPathMount(a.(*HostPathMount), b.(*kubeadm.HostPathMount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.HostPathMount)(nil), (*HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_HostPathMount_To_v1beta2_HostPathMount(a.(*kubeadm.HostPathMount), b.(*HostPathMount), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageMeta)(nil), (*kubeadm.ImageMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(a.(*ImageMeta), b.(*kubeadm.ImageMeta), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.ImageMeta)(nil), (*ImageMeta)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(a.(*kubeadm.ImageMeta), b.(*ImageMeta), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*InitConfiguration)(nil), (*kubeadm.InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_InitConfiguration_To_kubeadm_InitConfiguration(a.(*InitConfiguration), b.(*kubeadm.InitConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.InitConfiguration)(nil), (*InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_InitConfiguration_To_v1beta2_InitConfiguration(a.(*kubeadm.InitConfiguration), b.(*InitConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*JoinConfiguration)(nil), (*kubeadm.JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_JoinConfiguration_To_kubeadm_JoinConfiguration(a.(*JoinConfiguration), b.(*kubeadm.JoinConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.JoinConfiguration)(nil), (*JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_JoinConfiguration_To_v1beta2_JoinConfiguration(a.(*kubeadm.JoinConfiguration), b.(*JoinConfiguration), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*JoinControlPlane)(nil), (*kubeadm.JoinControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_JoinControlPlane_To_kubeadm_JoinControlPlane(a.(*JoinControlPlane), b.(*kubeadm.JoinControlPlane), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.JoinControlPlane)(nil), (*JoinControlPlane)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_JoinControlPlane_To_v1beta2_JoinControlPlane(a.(*kubeadm.JoinControlPlane), b.(*JoinControlPlane), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*LocalEtcd)(nil), (*kubeadm.LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_LocalEtcd_To_kubeadm_LocalEtcd(a.(*LocalEtcd), b.(*kubeadm.LocalEtcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.LocalEtcd)(nil), (*LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_LocalEtcd_To_v1beta2_LocalEtcd(a.(*kubeadm.LocalEtcd), b.(*LocalEtcd), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*kubeadm.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_Networking_To_kubeadm_Networking(a.(*Networking), b.(*kubeadm.Networking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_Networking_To_v1beta2_Networking(a.(*kubeadm.Networking), b.(*Networking), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeRegistrationOptions)(nil), (*kubeadm.NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(a.(*NodeRegistrationOptions), b.(*kubeadm.NodeRegistrationOptions), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kubeadm.NodeRegistrationOptions)(nil), (*NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(a.(*kubeadm.NodeRegistrationOptions), b.(*NodeRegistrationOptions), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(in *APIEndpoint, out *kubeadm.APIEndpoint, s conversion.Scope) error {
out.AdvertiseAddress = in.AdvertiseAddress
out.BindPort = in.BindPort
return nil
}
// Convert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint is an autogenerated conversion function.
func Convert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(in *APIEndpoint, out *kubeadm.APIEndpoint, s conversion.Scope) error {
return autoConvert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(in, out, s)
}
func autoConvert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(in *kubeadm.APIEndpoint, out *APIEndpoint, s conversion.Scope) error {
out.AdvertiseAddress = in.AdvertiseAddress
out.BindPort = in.BindPort
return nil
}
// Convert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint is an autogenerated conversion function.
func Convert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(in *kubeadm.APIEndpoint, out *APIEndpoint, s conversion.Scope) error {
return autoConvert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(in, out, s)
}
func autoConvert_v1beta2_APIServer_To_kubeadm_APIServer(in *APIServer, out *kubeadm.APIServer, s conversion.Scope) error {
if err := Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.ControlPlaneComponent, &out.ControlPlaneComponent, s); err != nil {
return err
}
out.CertSANs = *(*[]string)(unsafe.Pointer(&in.CertSANs))
out.TimeoutForControlPlane = (*v1.Duration)(unsafe.Pointer(in.TimeoutForControlPlane))
return nil
}
// Convert_v1beta2_APIServer_To_kubeadm_APIServer is an autogenerated conversion function.
func Convert_v1beta2_APIServer_To_kubeadm_APIServer(in *APIServer, out *kubeadm.APIServer, s conversion.Scope) error {
return autoConvert_v1beta2_APIServer_To_kubeadm_APIServer(in, out, s)
}
func autoConvert_kubeadm_APIServer_To_v1beta2_APIServer(in *kubeadm.APIServer, out *APIServer, s conversion.Scope) error {
if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(&in.ControlPlaneComponent, &out.ControlPlaneComponent, s); err != nil {
return err
}
out.CertSANs = *(*[]string)(unsafe.Pointer(&in.CertSANs))
out.TimeoutForControlPlane = (*v1.Duration)(unsafe.Pointer(in.TimeoutForControlPlane))
return nil
}
// Convert_kubeadm_APIServer_To_v1beta2_APIServer is an autogenerated conversion function.
func Convert_kubeadm_APIServer_To_v1beta2_APIServer(in *kubeadm.APIServer, out *APIServer, s conversion.Scope) error {
return autoConvert_kubeadm_APIServer_To_v1beta2_APIServer(in, out, s)
}
func autoConvert_v1beta2_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error {
out.Token = (*kubeadm.BootstrapTokenString)(unsafe.Pointer(in.Token))
out.Description = in.Description
out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL))
out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires))
out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages))
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
return nil
}
// Convert_v1beta2_BootstrapToken_To_kubeadm_BootstrapToken is an autogenerated conversion function.
func Convert_v1beta2_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error {
return autoConvert_v1beta2_BootstrapToken_To_kubeadm_BootstrapToken(in, out, s)
}
func autoConvert_kubeadm_BootstrapToken_To_v1beta2_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error {
out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token))
out.Description = in.Description
out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL))
out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires))
out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages))
out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups))
return nil
}
// Convert_kubeadm_BootstrapToken_To_v1beta2_BootstrapToken is an autogenerated conversion function.
func Convert_kubeadm_BootstrapToken_To_v1beta2_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error {
return autoConvert_kubeadm_BootstrapToken_To_v1beta2_BootstrapToken(in, out, s)
}
func autoConvert_v1beta2_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *kubeadm.BootstrapTokenDiscovery, s conversion.Scope) error {
out.Token = in.Token
out.APIServerEndpoint = in.APIServerEndpoint
out.CACertHashes = *(*[]string)(unsafe.Pointer(&in.CACertHashes))
out.UnsafeSkipCAVerification = in.UnsafeSkipCAVerification
return nil
}
// Convert_v1beta2_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery is an autogenerated conversion function.
func Convert_v1beta2_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in *BootstrapTokenDiscovery, out *kubeadm.BootstrapTokenDiscovery, s conversion.Scope) error {
return autoConvert_v1beta2_BootstrapTokenDiscovery_To_kubeadm_BootstrapTokenDiscovery(in, out, s)
}
func autoConvert_kubeadm_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *kubeadm.BootstrapTokenDiscovery, out *BootstrapTokenDiscovery, s conversion.Scope) error {
out.Token = in.Token
out.APIServerEndpoint = in.APIServerEndpoint
out.CACertHashes = *(*[]string)(unsafe.Pointer(&in.CACertHashes))
out.UnsafeSkipCAVerification = in.UnsafeSkipCAVerification
return nil
}
// Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery is an autogenerated conversion function.
func Convert_kubeadm_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in *kubeadm.BootstrapTokenDiscovery, out *BootstrapTokenDiscovery, s conversion.Scope) error {
return autoConvert_kubeadm_BootstrapTokenDiscovery_To_v1beta2_BootstrapTokenDiscovery(in, out, s)
}
func autoConvert_v1beta2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error {
out.ID = in.ID
out.Secret = in.Secret
return nil
}
// Convert_v1beta2_BootstrapTokenString_To_kubeadm_BootstrapTokenString is an autogenerated conversion function.
func Convert_v1beta2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error {
return autoConvert_v1beta2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in, out, s)
}
func autoConvert_kubeadm_BootstrapTokenString_To_v1beta2_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error {
out.ID = in.ID
out.Secret = in.Secret
return nil
}
// Convert_kubeadm_BootstrapTokenString_To_v1beta2_BootstrapTokenString is an autogenerated conversion function.
func Convert_kubeadm_BootstrapTokenString_To_v1beta2_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error {
return autoConvert_kubeadm_BootstrapTokenString_To_v1beta2_BootstrapTokenString(in, out, s)
}
func autoConvert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error {
if err := Convert_v1beta2_Etcd_To_kubeadm_Etcd(&in.Etcd, &out.Etcd, s); err != nil {
return err
}
if err := Convert_v1beta2_Networking_To_kubeadm_Networking(&in.Networking, &out.Networking, s); err != nil {
return err
}
out.KubernetesVersion = in.KubernetesVersion
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
if err := Convert_v1beta2_APIServer_To_kubeadm_APIServer(&in.APIServer, &out.APIServer, s); err != nil {
return err
}
if err := Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.ControllerManager, &out.ControllerManager, s); err != nil {
return err
}
if err := Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(&in.Scheduler, &out.Scheduler, s); err != nil {
return err
}
if err := Convert_v1beta2_DNS_To_kubeadm_DNS(&in.DNS, &out.DNS, s); err != nil {
return err
}
out.CertificatesDir = in.CertificatesDir
out.ImageRepository = in.ImageRepository
out.UseHyperKubeImage = in.UseHyperKubeImage
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.ClusterName = in.ClusterName
return nil
}
// Convert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration is an autogenerated conversion function.
func Convert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in *ClusterConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration(in, out, s)
}
func autoConvert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error {
// INFO: in.ComponentConfigs opted out of conversion generation
if err := Convert_kubeadm_Etcd_To_v1beta2_Etcd(&in.Etcd, &out.Etcd, s); err != nil {
return err
}
if err := Convert_kubeadm_Networking_To_v1beta2_Networking(&in.Networking, &out.Networking, s); err != nil {
return err
}
out.KubernetesVersion = in.KubernetesVersion
out.ControlPlaneEndpoint = in.ControlPlaneEndpoint
if err := Convert_kubeadm_APIServer_To_v1beta2_APIServer(&in.APIServer, &out.APIServer, s); err != nil {
return err
}
if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(&in.ControllerManager, &out.ControllerManager, s); err != nil {
return err
}
if err := Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(&in.Scheduler, &out.Scheduler, s); err != nil {
return err
}
if err := Convert_kubeadm_DNS_To_v1beta2_DNS(&in.DNS, &out.DNS, s); err != nil {
return err
}
out.CertificatesDir = in.CertificatesDir
out.ImageRepository = in.ImageRepository
// INFO: in.CIImageRepository opted out of conversion generation
out.UseHyperKubeImage = in.UseHyperKubeImage
out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates))
out.ClusterName = in.ClusterName
return nil
}
// Convert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration is an autogenerated conversion function.
func Convert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration(in *kubeadm.ClusterConfiguration, out *ClusterConfiguration, s conversion.Scope) error {
return autoConvert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration(in, out, s)
}
func autoConvert_v1beta2_ClusterStatus_To_kubeadm_ClusterStatus(in *ClusterStatus, out *kubeadm.ClusterStatus, s conversion.Scope) error {
out.APIEndpoints = *(*map[string]kubeadm.APIEndpoint)(unsafe.Pointer(&in.APIEndpoints))
return nil
}
// Convert_v1beta2_ClusterStatus_To_kubeadm_ClusterStatus is an autogenerated conversion function.
func Convert_v1beta2_ClusterStatus_To_kubeadm_ClusterStatus(in *ClusterStatus, out *kubeadm.ClusterStatus, s conversion.Scope) error {
return autoConvert_v1beta2_ClusterStatus_To_kubeadm_ClusterStatus(in, out, s)
}
func autoConvert_kubeadm_ClusterStatus_To_v1beta2_ClusterStatus(in *kubeadm.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {
out.APIEndpoints = *(*map[string]APIEndpoint)(unsafe.Pointer(&in.APIEndpoints))
return nil
}
// Convert_kubeadm_ClusterStatus_To_v1beta2_ClusterStatus is an autogenerated conversion function.
func Convert_kubeadm_ClusterStatus_To_v1beta2_ClusterStatus(in *kubeadm.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {
return autoConvert_kubeadm_ClusterStatus_To_v1beta2_ClusterStatus(in, out, s)
}
func autoConvert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error {
out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs))
out.ExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.ExtraVolumes))
return nil
}
// Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent is an autogenerated conversion function.
func Convert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in *ControlPlaneComponent, out *kubeadm.ControlPlaneComponent, s conversion.Scope) error {
return autoConvert_v1beta2_ControlPlaneComponent_To_kubeadm_ControlPlaneComponent(in, out, s)
}
func autoConvert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error {
out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs))
out.ExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.ExtraVolumes))
return nil
}
// Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent is an autogenerated conversion function.
func Convert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(in *kubeadm.ControlPlaneComponent, out *ControlPlaneComponent, s conversion.Scope) error {
return autoConvert_kubeadm_ControlPlaneComponent_To_v1beta2_ControlPlaneComponent(in, out, s)
}
func autoConvert_v1beta2_DNS_To_kubeadm_DNS(in *DNS, out *kubeadm.DNS, s conversion.Scope) error {
out.Type = kubeadm.DNSAddOnType(in.Type)
if err := Convert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_DNS_To_kubeadm_DNS is an autogenerated conversion function.
func Convert_v1beta2_DNS_To_kubeadm_DNS(in *DNS, out *kubeadm.DNS, s conversion.Scope) error {
return autoConvert_v1beta2_DNS_To_kubeadm_DNS(in, out, s)
}
func autoConvert_kubeadm_DNS_To_v1beta2_DNS(in *kubeadm.DNS, out *DNS, s conversion.Scope) error {
out.Type = DNSAddOnType(in.Type)
if err := Convert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil {
return err
}
return nil
}
// Convert_kubeadm_DNS_To_v1beta2_DNS is an autogenerated conversion function.
func Convert_kubeadm_DNS_To_v1beta2_DNS(in *kubeadm.DNS, out *DNS, s conversion.Scope) error {
return autoConvert_kubeadm_DNS_To_v1beta2_DNS(in, out, s)
}
func autoConvert_v1beta2_Discovery_To_kubeadm_Discovery(in *Discovery, out *kubeadm.Discovery, s conversion.Scope) error {
out.BootstrapToken = (*kubeadm.BootstrapTokenDiscovery)(unsafe.Pointer(in.BootstrapToken))
out.File = (*kubeadm.FileDiscovery)(unsafe.Pointer(in.File))
out.TLSBootstrapToken = in.TLSBootstrapToken
out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout))
return nil
}
// Convert_v1beta2_Discovery_To_kubeadm_Discovery is an autogenerated conversion function.
func Convert_v1beta2_Discovery_To_kubeadm_Discovery(in *Discovery, out *kubeadm.Discovery, s conversion.Scope) error {
return autoConvert_v1beta2_Discovery_To_kubeadm_Discovery(in, out, s)
}
func autoConvert_kubeadm_Discovery_To_v1beta2_Discovery(in *kubeadm.Discovery, out *Discovery, s conversion.Scope) error {
out.BootstrapToken = (*BootstrapTokenDiscovery)(unsafe.Pointer(in.BootstrapToken))
out.File = (*FileDiscovery)(unsafe.Pointer(in.File))
out.TLSBootstrapToken = in.TLSBootstrapToken
out.Timeout = (*v1.Duration)(unsafe.Pointer(in.Timeout))
return nil
}
// Convert_kubeadm_Discovery_To_v1beta2_Discovery is an autogenerated conversion function.
func Convert_kubeadm_Discovery_To_v1beta2_Discovery(in *kubeadm.Discovery, out *Discovery, s conversion.Scope) error {
return autoConvert_kubeadm_Discovery_To_v1beta2_Discovery(in, out, s)
}
func autoConvert_v1beta2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error {
out.Local = (*kubeadm.LocalEtcd)(unsafe.Pointer(in.Local))
out.External = (*kubeadm.ExternalEtcd)(unsafe.Pointer(in.External))
return nil
}
// Convert_v1beta2_Etcd_To_kubeadm_Etcd is an autogenerated conversion function.
func Convert_v1beta2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error {
return autoConvert_v1beta2_Etcd_To_kubeadm_Etcd(in, out, s)
}
func autoConvert_kubeadm_Etcd_To_v1beta2_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error {
out.Local = (*LocalEtcd)(unsafe.Pointer(in.Local))
out.External = (*ExternalEtcd)(unsafe.Pointer(in.External))
return nil
}
// Convert_kubeadm_Etcd_To_v1beta2_Etcd is an autogenerated conversion function.
func Convert_kubeadm_Etcd_To_v1beta2_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error {
return autoConvert_kubeadm_Etcd_To_v1beta2_Etcd(in, out, s)
}
func autoConvert_v1beta2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error {
out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints))
out.CAFile = in.CAFile
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_v1beta2_ExternalEtcd_To_kubeadm_ExternalEtcd is an autogenerated conversion function.
func Convert_v1beta2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error {
return autoConvert_v1beta2_ExternalEtcd_To_kubeadm_ExternalEtcd(in, out, s)
}
func autoConvert_kubeadm_ExternalEtcd_To_v1beta2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error {
out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints))
out.CAFile = in.CAFile
out.CertFile = in.CertFile
out.KeyFile = in.KeyFile
return nil
}
// Convert_kubeadm_ExternalEtcd_To_v1beta2_ExternalEtcd is an autogenerated conversion function.
func Convert_kubeadm_ExternalEtcd_To_v1beta2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error {
return autoConvert_kubeadm_ExternalEtcd_To_v1beta2_ExternalEtcd(in, out, s)
}
func autoConvert_v1beta2_FileDiscovery_To_kubeadm_FileDiscovery(in *FileDiscovery, out *kubeadm.FileDiscovery, s conversion.Scope) error {
out.KubeConfigPath = in.KubeConfigPath
return nil
}
// Convert_v1beta2_FileDiscovery_To_kubeadm_FileDiscovery is an autogenerated conversion function.
func Convert_v1beta2_FileDiscovery_To_kubeadm_FileDiscovery(in *FileDiscovery, out *kubeadm.FileDiscovery, s conversion.Scope) error {
return autoConvert_v1beta2_FileDiscovery_To_kubeadm_FileDiscovery(in, out, s)
}
func autoConvert_kubeadm_FileDiscovery_To_v1beta2_FileDiscovery(in *kubeadm.FileDiscovery, out *FileDiscovery, s conversion.Scope) error {
out.KubeConfigPath = in.KubeConfigPath
return nil
}
// Convert_kubeadm_FileDiscovery_To_v1beta2_FileDiscovery is an autogenerated conversion function.
func Convert_kubeadm_FileDiscovery_To_v1beta2_FileDiscovery(in *kubeadm.FileDiscovery, out *FileDiscovery, s conversion.Scope) error {
return autoConvert_kubeadm_FileDiscovery_To_v1beta2_FileDiscovery(in, out, s)
}
func autoConvert_v1beta2_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error {
out.Name = in.Name
out.HostPath = in.HostPath
out.MountPath = in.MountPath
out.ReadOnly = in.ReadOnly
out.PathType = corev1.HostPathType(in.PathType)
return nil
}
// Convert_v1beta2_HostPathMount_To_kubeadm_HostPathMount is an autogenerated conversion function.
func Convert_v1beta2_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error {
return autoConvert_v1beta2_HostPathMount_To_kubeadm_HostPathMount(in, out, s)
}
func autoConvert_kubeadm_HostPathMount_To_v1beta2_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error {
out.Name = in.Name
out.HostPath = in.HostPath
out.MountPath = in.MountPath
out.ReadOnly = in.ReadOnly
out.PathType = corev1.HostPathType(in.PathType)
return nil
}
// Convert_kubeadm_HostPathMount_To_v1beta2_HostPathMount is an autogenerated conversion function.
func Convert_kubeadm_HostPathMount_To_v1beta2_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error {
return autoConvert_kubeadm_HostPathMount_To_v1beta2_HostPathMount(in, out, s)
}
func autoConvert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(in *ImageMeta, out *kubeadm.ImageMeta, s conversion.Scope) error {
out.ImageRepository = in.ImageRepository
out.ImageTag = in.ImageTag
return nil
}
// Convert_v1beta2_ImageMeta_To_kubeadm_ImageMeta is an autogenerated conversion function.
func Convert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(in *ImageMeta, out *kubeadm.ImageMeta, s conversion.Scope) error {
return autoConvert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(in, out, s)
}
func autoConvert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(in *kubeadm.ImageMeta, out *ImageMeta, s conversion.Scope) error {
out.ImageRepository = in.ImageRepository
out.ImageTag = in.ImageTag
return nil
}
// Convert_kubeadm_ImageMeta_To_v1beta2_ImageMeta is an autogenerated conversion function.
func Convert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(in *kubeadm.ImageMeta, out *ImageMeta, s conversion.Scope) error {
return autoConvert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(in, out, s)
}
func autoConvert_v1beta2_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error {
if err := Convert_v1beta2_ClusterConfiguration_To_kubeadm_ClusterConfiguration(&in.ClusterConfiguration, &out.ClusterConfiguration, s); err != nil {
return err
}
out.BootstrapTokens = *(*[]kubeadm.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens))
if err := Convert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil {
return err
}
if err := Convert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_InitConfiguration_To_kubeadm_InitConfiguration is an autogenerated conversion function.
func Convert_v1beta2_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_InitConfiguration_To_kubeadm_InitConfiguration(in, out, s)
}
func autoConvert_kubeadm_InitConfiguration_To_v1beta2_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error {
if err := Convert_kubeadm_ClusterConfiguration_To_v1beta2_ClusterConfiguration(&in.ClusterConfiguration, &out.ClusterConfiguration, s); err != nil {
return err
}
out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens))
if err := Convert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil {
return err
}
if err := Convert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_kubeadm_InitConfiguration_To_v1beta2_InitConfiguration is an autogenerated conversion function.
func Convert_kubeadm_InitConfiguration_To_v1beta2_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error {
return autoConvert_kubeadm_InitConfiguration_To_v1beta2_InitConfiguration(in, out, s)
}
func autoConvert_v1beta2_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error {
if err := Convert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil {
return err
}
out.CACertPath = in.CACertPath
if err := Convert_v1beta2_Discovery_To_kubeadm_Discovery(&in.Discovery, &out.Discovery, s); err != nil {
return err
}
out.ControlPlane = (*kubeadm.JoinControlPlane)(unsafe.Pointer(in.ControlPlane))
return nil
}
// Convert_v1beta2_JoinConfiguration_To_kubeadm_JoinConfiguration is an autogenerated conversion function.
func Convert_v1beta2_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error {
return autoConvert_v1beta2_JoinConfiguration_To_kubeadm_JoinConfiguration(in, out, s)
}
func autoConvert_kubeadm_JoinConfiguration_To_v1beta2_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error {
if err := Convert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil {
return err
}
out.CACertPath = in.CACertPath
if err := Convert_kubeadm_Discovery_To_v1beta2_Discovery(&in.Discovery, &out.Discovery, s); err != nil {
return err
}
out.ControlPlane = (*JoinControlPlane)(unsafe.Pointer(in.ControlPlane))
return nil
}
// Convert_kubeadm_JoinConfiguration_To_v1beta2_JoinConfiguration is an autogenerated conversion function.
func Convert_kubeadm_JoinConfiguration_To_v1beta2_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error {
return autoConvert_kubeadm_JoinConfiguration_To_v1beta2_JoinConfiguration(in, out, s)
}
func autoConvert_v1beta2_JoinControlPlane_To_kubeadm_JoinControlPlane(in *JoinControlPlane, out *kubeadm.JoinControlPlane, s conversion.Scope) error {
if err := Convert_v1beta2_APIEndpoint_To_kubeadm_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_v1beta2_JoinControlPlane_To_kubeadm_JoinControlPlane is an autogenerated conversion function.
func Convert_v1beta2_JoinControlPlane_To_kubeadm_JoinControlPlane(in *JoinControlPlane, out *kubeadm.JoinControlPlane, s conversion.Scope) error {
return autoConvert_v1beta2_JoinControlPlane_To_kubeadm_JoinControlPlane(in, out, s)
}
func autoConvert_kubeadm_JoinControlPlane_To_v1beta2_JoinControlPlane(in *kubeadm.JoinControlPlane, out *JoinControlPlane, s conversion.Scope) error {
if err := Convert_kubeadm_APIEndpoint_To_v1beta2_APIEndpoint(&in.LocalAPIEndpoint, &out.LocalAPIEndpoint, s); err != nil {
return err
}
return nil
}
// Convert_kubeadm_JoinControlPlane_To_v1beta2_JoinControlPlane is an autogenerated conversion function.
func Convert_kubeadm_JoinControlPlane_To_v1beta2_JoinControlPlane(in *kubeadm.JoinControlPlane, out *JoinControlPlane, s conversion.Scope) error {
return autoConvert_kubeadm_JoinControlPlane_To_v1beta2_JoinControlPlane(in, out, s)
}
func autoConvert_v1beta2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error {
if err := Convert_v1beta2_ImageMeta_To_kubeadm_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil {
return err
}
out.DataDir = in.DataDir
out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs))
out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs))
out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs))
return nil
}
// Convert_v1beta2_LocalEtcd_To_kubeadm_LocalEtcd is an autogenerated conversion function.
func Convert_v1beta2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error {
return autoConvert_v1beta2_LocalEtcd_To_kubeadm_LocalEtcd(in, out, s)
}
func autoConvert_kubeadm_LocalEtcd_To_v1beta2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error {
if err := Convert_kubeadm_ImageMeta_To_v1beta2_ImageMeta(&in.ImageMeta, &out.ImageMeta, s); err != nil {
return err
}
out.DataDir = in.DataDir
out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs))
out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs))
out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs))
return nil
}
// Convert_kubeadm_LocalEtcd_To_v1beta2_LocalEtcd is an autogenerated conversion function.
func Convert_kubeadm_LocalEtcd_To_v1beta2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error {
return autoConvert_kubeadm_LocalEtcd_To_v1beta2_LocalEtcd(in, out, s)
}
func autoConvert_v1beta2_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error {
out.ServiceSubnet = in.ServiceSubnet
out.PodSubnet = in.PodSubnet
out.DNSDomain = in.DNSDomain
return nil
}
// Convert_v1beta2_Networking_To_kubeadm_Networking is an autogenerated conversion function.
func Convert_v1beta2_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error {
return autoConvert_v1beta2_Networking_To_kubeadm_Networking(in, out, s)
}
func autoConvert_kubeadm_Networking_To_v1beta2_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error {
out.ServiceSubnet = in.ServiceSubnet
out.PodSubnet = in.PodSubnet
out.DNSDomain = in.DNSDomain
return nil
}
// Convert_kubeadm_Networking_To_v1beta2_Networking is an autogenerated conversion function.
func Convert_kubeadm_Networking_To_v1beta2_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error {
return autoConvert_kubeadm_Networking_To_v1beta2_Networking(in, out, s)
}
func autoConvert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error {
out.Name = in.Name
out.CRISocket = in.CRISocket
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
return nil
}
// Convert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions is an autogenerated conversion function.
func Convert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error {
return autoConvert_v1beta2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in, out, s)
}
func autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error {
out.Name = in.Name
out.CRISocket = in.CRISocket
out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints))
out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs))
return nil
}
// Convert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions is an autogenerated conversion function.
func Convert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error {
return autoConvert_kubeadm_NodeRegistrationOptions_To_v1beta2_NodeRegistrationOptions(in, out, s)
}

View File

@ -0,0 +1,552 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint.
func (in *APIEndpoint) DeepCopy() *APIEndpoint {
if in == nil {
return nil
}
out := new(APIEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServer) DeepCopyInto(out *APIServer) {
*out = *in
in.ControlPlaneComponent.DeepCopyInto(&out.ControlPlaneComponent)
if in.CertSANs != nil {
in, out := &in.CertSANs, &out.CertSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.TimeoutForControlPlane != nil {
in, out := &in.TimeoutForControlPlane, &out.TimeoutForControlPlane
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer.
func (in *APIServer) DeepCopy() *APIServer {
if in == nil {
return nil
}
out := new(APIServer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootstrapToken) DeepCopyInto(out *BootstrapToken) {
*out = *in
if in.Token != nil {
in, out := &in.Token, &out.Token
*out = new(BootstrapTokenString)
**out = **in
}
if in.TTL != nil {
in, out := &in.TTL, &out.TTL
*out = new(v1.Duration)
**out = **in
}
if in.Expires != nil {
in, out := &in.Expires, &out.Expires
*out = (*in).DeepCopy()
}
if in.Usages != nil {
in, out := &in.Usages, &out.Usages
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Groups != nil {
in, out := &in.Groups, &out.Groups
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapToken.
func (in *BootstrapToken) DeepCopy() *BootstrapToken {
if in == nil {
return nil
}
out := new(BootstrapToken)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootstrapTokenDiscovery) DeepCopyInto(out *BootstrapTokenDiscovery) {
*out = *in
if in.CACertHashes != nil {
in, out := &in.CACertHashes, &out.CACertHashes
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenDiscovery.
func (in *BootstrapTokenDiscovery) DeepCopy() *BootstrapTokenDiscovery {
if in == nil {
return nil
}
out := new(BootstrapTokenDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootstrapTokenString) DeepCopyInto(out *BootstrapTokenString) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenString.
func (in *BootstrapTokenString) DeepCopy() *BootstrapTokenString {
if in == nil {
return nil
}
out := new(BootstrapTokenString)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Etcd.DeepCopyInto(&out.Etcd)
out.Networking = in.Networking
in.APIServer.DeepCopyInto(&out.APIServer)
in.ControllerManager.DeepCopyInto(&out.ControllerManager)
in.Scheduler.DeepCopyInto(&out.Scheduler)
out.DNS = in.DNS
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfiguration.
func (in *ClusterConfiguration) DeepCopy() *ClusterConfiguration {
if in == nil {
return nil
}
out := new(ClusterConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.APIEndpoints != nil {
in, out := &in.APIEndpoints, &out.APIEndpoints
*out = make(map[string]APIEndpoint, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (in *ClusterStatus) DeepCopy() *ClusterStatus {
if in == nil {
return nil
}
out := new(ClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterStatus) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ControlPlaneComponent) DeepCopyInto(out *ControlPlaneComponent) {
*out = *in
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraVolumes != nil {
in, out := &in.ExtraVolumes, &out.ExtraVolumes
*out = make([]HostPathMount, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneComponent.
func (in *ControlPlaneComponent) DeepCopy() *ControlPlaneComponent {
if in == nil {
return nil
}
out := new(ControlPlaneComponent)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNS) DeepCopyInto(out *DNS) {
*out = *in
out.ImageMeta = in.ImageMeta
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS.
func (in *DNS) DeepCopy() *DNS {
if in == nil {
return nil
}
out := new(DNS)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Discovery) DeepCopyInto(out *Discovery) {
*out = *in
if in.BootstrapToken != nil {
in, out := &in.BootstrapToken, &out.BootstrapToken
*out = new(BootstrapTokenDiscovery)
(*in).DeepCopyInto(*out)
}
if in.File != nil {
in, out := &in.File, &out.File
*out = new(FileDiscovery)
**out = **in
}
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Discovery.
func (in *Discovery) DeepCopy() *Discovery {
if in == nil {
return nil
}
out := new(Discovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Etcd) DeepCopyInto(out *Etcd) {
*out = *in
if in.Local != nil {
in, out := &in.Local, &out.Local
*out = new(LocalEtcd)
(*in).DeepCopyInto(*out)
}
if in.External != nil {
in, out := &in.External, &out.External
*out = new(ExternalEtcd)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd.
func (in *Etcd) DeepCopy() *Etcd {
if in == nil {
return nil
}
out := new(Etcd)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = *in
if in.Endpoints != nil {
in, out := &in.Endpoints, &out.Endpoints
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd.
func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
if in == nil {
return nil
}
out := new(ExternalEtcd)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FileDiscovery) DeepCopyInto(out *FileDiscovery) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileDiscovery.
func (in *FileDiscovery) DeepCopy() *FileDiscovery {
if in == nil {
return nil
}
out := new(FileDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostPathMount) DeepCopyInto(out *HostPathMount) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathMount.
func (in *HostPathMount) DeepCopy() *HostPathMount {
if in == nil {
return nil
}
out := new(HostPathMount)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageMeta) DeepCopyInto(out *ImageMeta) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageMeta.
func (in *ImageMeta) DeepCopy() *ImageMeta {
if in == nil {
return nil
}
out := new(ImageMeta)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InitConfiguration) DeepCopyInto(out *InitConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ClusterConfiguration.DeepCopyInto(&out.ClusterConfiguration)
if in.BootstrapTokens != nil {
in, out := &in.BootstrapTokens, &out.BootstrapTokens
*out = make([]BootstrapToken, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.NodeRegistration.DeepCopyInto(&out.NodeRegistration)
out.LocalAPIEndpoint = in.LocalAPIEndpoint
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitConfiguration.
func (in *InitConfiguration) DeepCopy() *InitConfiguration {
if in == nil {
return nil
}
out := new(InitConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *InitConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JoinConfiguration) DeepCopyInto(out *JoinConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.NodeRegistration.DeepCopyInto(&out.NodeRegistration)
in.Discovery.DeepCopyInto(&out.Discovery)
if in.ControlPlane != nil {
in, out := &in.ControlPlane, &out.ControlPlane
*out = new(JoinControlPlane)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinConfiguration.
func (in *JoinConfiguration) DeepCopy() *JoinConfiguration {
if in == nil {
return nil
}
out := new(JoinConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *JoinConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JoinControlPlane) DeepCopyInto(out *JoinControlPlane) {
*out = *in
out.LocalAPIEndpoint = in.LocalAPIEndpoint
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinControlPlane.
func (in *JoinControlPlane) DeepCopy() *JoinControlPlane {
if in == nil {
return nil
}
out := new(JoinControlPlane)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) {
*out = *in
out.ImageMeta = in.ImageMeta
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ServerCertSANs != nil {
in, out := &in.ServerCertSANs, &out.ServerCertSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PeerCertSANs != nil {
in, out := &in.PeerCertSANs, &out.PeerCertSANs
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd.
func (in *LocalEtcd) DeepCopy() *LocalEtcd {
if in == nil {
return nil
}
out := new(LocalEtcd)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Networking) DeepCopyInto(out *Networking) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking.
func (in *Networking) DeepCopy() *Networking {
if in == nil {
return nil
}
out := new(Networking)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) {
*out = *in
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]corev1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.KubeletExtraArgs != nil {
in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions.
func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions {
if in == nil {
return nil
}
out := new(NodeRegistrationOptions)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,66 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by defaulter-gen. DO NOT EDIT.
package v1beta2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
scheme.AddTypeDefaultingFunc(&ClusterConfiguration{}, func(obj interface{}) { SetObjectDefaults_ClusterConfiguration(obj.(*ClusterConfiguration)) })
scheme.AddTypeDefaultingFunc(&ClusterStatus{}, func(obj interface{}) { SetObjectDefaults_ClusterStatus(obj.(*ClusterStatus)) })
scheme.AddTypeDefaultingFunc(&InitConfiguration{}, func(obj interface{}) { SetObjectDefaults_InitConfiguration(obj.(*InitConfiguration)) })
scheme.AddTypeDefaultingFunc(&JoinConfiguration{}, func(obj interface{}) { SetObjectDefaults_JoinConfiguration(obj.(*JoinConfiguration)) })
return nil
}
func SetObjectDefaults_ClusterConfiguration(in *ClusterConfiguration) {
SetDefaults_ClusterConfiguration(in)
SetDefaults_APIServer(&in.APIServer)
}
func SetObjectDefaults_ClusterStatus(in *ClusterStatus) {
}
func SetObjectDefaults_InitConfiguration(in *InitConfiguration) {
SetDefaults_InitConfiguration(in)
SetObjectDefaults_ClusterConfiguration(&in.ClusterConfiguration)
for i := range in.BootstrapTokens {
a := &in.BootstrapTokens[i]
SetDefaults_BootstrapToken(a)
}
SetDefaults_APIEndpoint(&in.LocalAPIEndpoint)
}
func SetObjectDefaults_JoinConfiguration(in *JoinConfiguration) {
SetDefaults_JoinConfiguration(in)
SetDefaults_Discovery(&in.Discovery)
if in.Discovery.File != nil {
SetDefaults_FileDiscovery(in.Discovery.File)
}
if in.ControlPlane != nil {
SetDefaults_JoinControlPlane(in.ControlPlane)
SetDefaults_APIEndpoint(&in.ControlPlane.LocalAPIEndpoint)
}
}

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/cmd/options:go_default_library",
"//cmd/kubeadm/app/componentconfigs:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
@ -31,7 +31,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",

View File

@ -33,7 +33,7 @@ import (
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstraputil "k8s.io/cluster-bootstrap/token/util"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
kubeadmcmdoptions "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -148,7 +148,7 @@ func ValidateDiscoveryBootstrapToken(b *kubeadm.BootstrapTokenDiscovery, fldPath
}
allErrs = append(allErrs, ValidateToken(b.Token, fldPath.Child(kubeadmcmdoptions.TokenStr))...)
allErrs = append(allErrs, ValidateDiscoveryTokenAPIServer(b.APIServerEndpoint, fldPath.Child("apiServerEndpoints"))...)
allErrs = append(allErrs, ValidateDiscoveryTokenAPIServer(b.APIServerEndpoint, fldPath.Child("apiServerEndpoint"))...)
return allErrs
}
@ -488,7 +488,7 @@ func ValidateSocketPath(socket string, fldPath *field.Path) field.ErrorList {
if !filepath.IsAbs(u.Path) {
return append(allErrs, field.Invalid(fldPath, socket, fmt.Sprintf("path is not absolute: %s", socket)))
}
} else if u.Scheme != kubeadmapiv1beta1.DefaultUrlScheme {
} else if u.Scheme != kubeadmapiv1beta2.DefaultUrlScheme {
return append(allErrs, field.Invalid(fldPath, socket, fmt.Sprintf("URL scheme %s is not supported", u.Scheme)))
}

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
utilpointer "k8s.io/utils/pointer"
)
@ -113,7 +113,7 @@ func TestValidateNodeRegistrationOptions(t *testing.T) {
{"invalid-node?name", "/some/path", true}, // Unsupported characters
{"valid-nodename", "/some/path", false}, // supported
{"valid-nodename-with-numbers01234", "/some/path/with/numbers/01234/", false}, // supported, with numbers as well
{"valid-nodename", kubeadmapiv1beta1.DefaultUrlScheme + "://" + "/some/path", false}, // supported, with socket url
{"valid-nodename", kubeadmapiv1beta2.DefaultUrlScheme + "://" + "/some/path", false}, // supported, with socket url
{"valid-nodename", "bla:///some/path", true}, // unsupported url scheme
{"valid-nodename", ":::", true}, // unparseable url
}

View File

@ -22,7 +22,7 @@ go_library(
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/alpha:go_default_library",
"//cmd/kubeadm/app/cmd/options:go_default_library",
@ -85,7 +85,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/options:go_default_library",
"//cmd/kubeadm/app/componentconfigs:go_default_library",

View File

@ -13,7 +13,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/options:go_default_library",
"//cmd/kubeadm/app/cmd/phases:go_default_library",

View File

@ -21,9 +21,10 @@ import (
"github.com/spf13/cobra"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
@ -73,7 +74,7 @@ func newCmdCertsRenewal() *cobra.Command {
type renewConfig struct {
cfgPath string
kubeconfigPath string
cfg kubeadmapiv1beta1.InitConfiguration
cfg kubeadmapiv1beta2.InitConfiguration
useAPI bool
useCSR bool
csrPath string
@ -145,11 +146,30 @@ func generateRenewalFunction(cert *certsphase.KubeadmCert, caCert *certsphase.Ku
return
}
renewer, err := getRenewer(cfg, caCert.BaseName)
kubeadmutil.CheckErr(err)
var externalCA bool
switch caCert.BaseName {
case kubeadmconstants.CACertAndKeyBaseName:
// Check if an external CA is provided by the user (when the CA Cert is present but the CA Key is not)
externalCA, _ = certsphase.UsingExternalCA(&internalcfg.ClusterConfiguration)
case kubeadmconstants.FrontProxyCACertAndKeyBaseName:
// Check if an external Front-Proxy CA is provided by the user (when the Front-Proxy CA Cert is present but the Front-Proxy CA Key is not)
externalCA, _ = certsphase.UsingExternalFrontProxyCA(&internalcfg.ClusterConfiguration)
default:
externalCA = false
}
err = renewal.RenewExistingCert(internalcfg.CertificatesDir, cert.BaseName, renewer)
kubeadmutil.CheckErr(err)
if !externalCA {
renewer, err := getRenewer(cfg, caCert.BaseName)
kubeadmutil.CheckErr(err)
err = renewal.RenewExistingCert(internalcfg.CertificatesDir, cert.BaseName, renewer)
kubeadmutil.CheckErr(err)
fmt.Printf("Certificate %s renewed\n", cert.Name)
return
}
fmt.Printf("Detected external %s, certificate %s can't be renewed\n", cert.CAName, cert.Name)
}
}

View File

@ -209,11 +209,13 @@ func TestRunRenewCommands(t *testing.T) {
t.Errorf("couldn't verify renewed cert: %v", err)
}
pubKey, ok := newCert.PublicKey.(*rsa.PublicKey)
if !ok {
switch pubKey := newCert.PublicKey.(type) {
case *rsa.PublicKey:
if pubKey.N.Cmp(newKey.(*rsa.PrivateKey).N) != 0 {
t.Error("private key does not match public key")
}
default:
t.Errorf("unknown public key type %T", newCert.PublicKey)
} else if pubKey.N.Cmp(newKey.N) != 0 {
t.Error("private key does not match public key")
}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/cobra"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
@ -61,7 +61,7 @@ func newCmdKubeConfigUtility(out io.Writer) *cobra.Command {
// newCmdUserKubeConfig returns sub commands for kubeconfig phase
func newCmdUserKubeConfig(out io.Writer) *cobra.Command {
cfg := &kubeadmapiv1beta1.InitConfiguration{}
cfg := &kubeadmapiv1beta2.InitConfiguration{}
// Default values for the cobra help text
kubeadmscheme.Scheme.Default(cfg)

View File

@ -26,7 +26,7 @@ import (
"github.com/spf13/cobra"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
@ -75,7 +75,7 @@ func NewCmdSelfhosting(in io.Reader) *cobra.Command {
// getSelfhostingSubCommand returns sub commands for Self-hosting phase
func getSelfhostingSubCommand(in io.Reader) *cobra.Command {
cfg := &kubeadmapiv1beta1.InitConfiguration{}
cfg := &kubeadmapiv1beta2.InitConfiguration{}
// Default values for the cobra help text
kubeadmscheme.Scheme.Default(cfg)

View File

@ -32,7 +32,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
phaseutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
@ -50,8 +50,8 @@ import (
var (
// placeholderToken is only set statically to make kubeadm not randomize the token on every run
placeholderToken = kubeadmapiv1beta1.BootstrapToken{
Token: &kubeadmapiv1beta1.BootstrapTokenString{
placeholderToken = kubeadmapiv1beta2.BootstrapToken{
Token: &kubeadmapiv1beta2.BootstrapTokenString{
ID: "abcdef",
Secret: "0123456789abcdef",
},
@ -176,14 +176,14 @@ func getSupportedComponentConfigAPIObjects() []string {
}
func getDefaultedInitConfig() (*kubeadmapi.InitConfiguration, error) {
return configutil.DefaultedInitConfiguration(&kubeadmapiv1beta1.InitConfiguration{
return configutil.DefaultedInitConfiguration(&kubeadmapiv1beta2.InitConfiguration{
// TODO: Probably move to getDefaultedClusterConfig?
LocalAPIEndpoint: kubeadmapiv1beta1.APIEndpoint{AdvertiseAddress: "1.2.3.4"},
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
LocalAPIEndpoint: kubeadmapiv1beta2.APIEndpoint{AdvertiseAddress: "1.2.3.4"},
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: fmt.Sprintf("v1.%d.0", constants.MinimumControlPlaneVersion.Minor()+1),
},
BootstrapTokens: []kubeadmapiv1beta1.BootstrapToken{placeholderToken},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
BootstrapTokens: []kubeadmapiv1beta2.BootstrapToken{placeholderToken},
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket, // avoid CRI detection
},
})
@ -199,15 +199,15 @@ func getDefaultInitConfigBytes() ([]byte, error) {
}
func getDefaultNodeConfigBytes() ([]byte, error) {
internalcfg, err := configutil.DefaultedJoinConfiguration(&kubeadmapiv1beta1.JoinConfiguration{
Discovery: kubeadmapiv1beta1.Discovery{
BootstrapToken: &kubeadmapiv1beta1.BootstrapTokenDiscovery{
internalcfg, err := configutil.DefaultedJoinConfiguration(&kubeadmapiv1beta2.JoinConfiguration{
Discovery: kubeadmapiv1beta2.Discovery{
BootstrapToken: &kubeadmapiv1beta2.BootstrapTokenDiscovery{
Token: placeholderToken.Token.String(),
APIServerEndpoint: "kube-apiserver:6443",
UnsafeSkipCAVerification: true, // TODO: UnsafeSkipCAVerification: true needs to be set for validation to pass, but shouldn't be recommended as the default
},
},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket, // avoid CRI detection
},
})
@ -237,7 +237,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command {
In other words, the output of this command is what kubeadm actually would read internally if you
submitted this file to "kubeadm init"
`), kubeadmapiv1beta1.SchemeGroupVersion, kubeadmapiv1beta1.SchemeGroupVersion),
`), kubeadmapiv1beta2.SchemeGroupVersion, kubeadmapiv1beta2.SchemeGroupVersion),
Run: func(cmd *cobra.Command, args []string) {
if len(oldCfgPath) == 0 {
kubeadmutil.CheckErr(errors.New("The --old-config flag is mandatory"))
@ -336,7 +336,7 @@ func NewCmdConfigUploadFromFile(out io.Writer, kubeConfigFile *string) *cobra.Co
// NewCmdConfigUploadFromFlags returns cobra.Command for "kubeadm config upload from-flags" command
func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.Command {
cfg := &kubeadmapiv1beta1.InitConfiguration{}
cfg := &kubeadmapiv1beta2.InitConfiguration{}
kubeadmscheme.Scheme.Default(cfg)
var featureGatesString string
@ -407,7 +407,7 @@ func NewCmdConfigImages(out io.Writer) *cobra.Command {
// NewCmdConfigImagesPull returns the `kubeadm config images pull` command
func NewCmdConfigImagesPull() *cobra.Command {
externalcfg := &kubeadmapiv1beta1.InitConfiguration{}
externalcfg := &kubeadmapiv1beta2.InitConfiguration{}
kubeadmscheme.Scheme.Default(externalcfg)
var cfgPath, featureGatesString string
var err error
@ -459,7 +459,7 @@ func PullControlPlaneImages(runtime utilruntime.ContainerRuntime, cfg *kubeadmap
// NewCmdConfigImagesList returns the "kubeadm config images list" command
func NewCmdConfigImagesList(out io.Writer, mockK8sVersion *string) *cobra.Command {
externalcfg := &kubeadmapiv1beta1.InitConfiguration{}
externalcfg := &kubeadmapiv1beta2.InitConfiguration{}
kubeadmscheme.Scheme.Default(externalcfg)
var cfgPath, featureGatesString string
var err error
@ -486,7 +486,7 @@ func NewCmdConfigImagesList(out io.Writer, mockK8sVersion *string) *cobra.Comman
}
// NewImagesList returns the underlying struct for the "kubeadm config images list" command
func NewImagesList(cfgPath string, cfg *kubeadmapiv1beta1.InitConfiguration) (*ImagesList, error) {
func NewImagesList(cfgPath string, cfg *kubeadmapiv1beta2.InitConfiguration) (*ImagesList, error) {
initcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, cfg)
if err != nil {
return nil, errors.Wrap(err, "could not convert cfg to an internal cfg")
@ -513,7 +513,7 @@ func (i *ImagesList) Run(out io.Writer) error {
}
// AddImagesCommonConfigFlags adds the flags that configure kubeadm (and affect the images kubeadm will use)
func AddImagesCommonConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta1.InitConfiguration, cfgPath *string, featureGatesString *string) {
func AddImagesCommonConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta2.InitConfiguration, cfgPath *string, featureGatesString *string) {
options.AddKubernetesVersionFlag(flagSet, &cfg.ClusterConfiguration.KubernetesVersion)
options.AddFeatureGatesStringFlag(flagSet, featureGatesString)
options.AddImageMetaFlags(flagSet, &cfg.ImageRepository)

View File

@ -30,7 +30,7 @@ import (
"github.com/lithammer/dedent"
"github.com/spf13/cobra"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
@ -74,7 +74,7 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) {
constants.CurrentKubernetesVersion.String(),
},
configContents: []byte(dedent.Dedent(fmt.Sprintf(`
apiVersion: kubeadm.k8s.io/v1beta1
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: %s
`, constants.CurrentKubernetesVersion))),
@ -86,7 +86,7 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) {
"coredns",
},
configContents: []byte(dedent.Dedent(fmt.Sprintf(`
apiVersion: kubeadm.k8s.io/v1beta1
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: %s
`, constants.MinimumControlPlaneVersion))),
@ -106,8 +106,8 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) {
t.Fatalf("Failed writing a config file: %v", err)
}
i, err := NewImagesList(configFilePath, &kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
i, err := NewImagesList(configFilePath, &kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: dummyKubernetesVersion,
},
})
@ -135,33 +135,33 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) {
func TestConfigImagesListRunWithoutPath(t *testing.T) {
testcases := []struct {
name string
cfg kubeadmapiv1beta1.InitConfiguration
cfg kubeadmapiv1beta2.InitConfiguration
expectedImages int
}{
{
name: "empty config",
expectedImages: defaultNumberOfImages,
cfg: kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
cfg: kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: dummyKubernetesVersion,
},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket,
},
},
},
{
name: "external etcd configuration",
cfg: kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
Etcd: kubeadmapiv1beta1.Etcd{
External: &kubeadmapiv1beta1.ExternalEtcd{
cfg: kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
Etcd: kubeadmapiv1beta2.Etcd{
External: &kubeadmapiv1beta2.ExternalEtcd{
Endpoints: []string{"https://some.etcd.com:2379"},
},
},
KubernetesVersion: dummyKubernetesVersion,
},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket,
},
},
@ -169,11 +169,11 @@ func TestConfigImagesListRunWithoutPath(t *testing.T) {
},
{
name: "coredns enabled",
cfg: kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
cfg: kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: dummyKubernetesVersion,
},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket,
},
},
@ -181,14 +181,14 @@ func TestConfigImagesListRunWithoutPath(t *testing.T) {
},
{
name: "kube-dns enabled",
cfg: kubeadmapiv1beta1.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta1.ClusterConfiguration{
cfg: kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
KubernetesVersion: dummyKubernetesVersion,
DNS: kubeadmapiv1beta1.DNS{
Type: kubeadmapiv1beta1.KubeDNS,
DNS: kubeadmapiv1beta2.DNS{
Type: kubeadmapiv1beta2.KubeDNS,
},
},
NodeRegistration: kubeadmapiv1beta1.NodeRegistrationOptions{
NodeRegistration: kubeadmapiv1beta2.NodeRegistrationOptions{
CRISocket: constants.DefaultDockerCRISocket,
},
},

View File

@ -31,7 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
phases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/init"
@ -95,7 +95,7 @@ type initOptions struct {
featureGatesString string
ignorePreflightErrors []string
bto *options.BootstrapTokenOptions
externalcfg *kubeadmapiv1beta1.InitConfiguration
externalcfg *kubeadmapiv1beta2.InitConfiguration
uploadCerts bool
certificateKey string
skipCertificateKeyPrint bool
@ -196,7 +196,7 @@ func NewCmdInit(out io.Writer, initOptions *initOptions) *cobra.Command {
}
// AddInitConfigFlags adds init flags bound to the config to the specified flagset
func AddInitConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta1.InitConfiguration, featureGatesString *string) {
func AddInitConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta2.InitConfiguration, featureGatesString *string) {
flagSet.StringVar(
&cfg.LocalAPIEndpoint.AdvertiseAddress, options.APIServerAdvertiseAddress, cfg.LocalAPIEndpoint.AdvertiseAddress,
"The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.",
@ -269,7 +269,7 @@ func AddInitOtherFlags(flagSet *flag.FlagSet, initOptions *initOptions) {
// newInitOptions returns a struct ready for being used for creating cmd init flags.
func newInitOptions() *initOptions {
// initialize the public kubeadm config API by applying defaults
externalcfg := &kubeadmapiv1beta1.InitConfiguration{}
externalcfg := &kubeadmapiv1beta2.InitConfiguration{}
kubeadmscheme.Scheme.Default(externalcfg)
// Create the options object for the bootstrap token-related flags, and override the default value for .Description

View File

@ -29,7 +29,7 @@ import (
const (
testInitConfig = `---
apiVersion: kubeadm.k8s.io/v1beta1
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "1.2.3.4"
@ -39,7 +39,7 @@ nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: someName
---
apiVersion: kubeadm.k8s.io/v1beta1
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
controlPlaneEndpoint: "3.4.5.6"
`

View File

@ -33,7 +33,7 @@ import (
"k8s.io/klog"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
phases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/join"
@ -127,7 +127,7 @@ type joinOptions struct {
token string
controlPlane bool
ignorePreflightErrors []string
externalcfg *kubeadmapiv1beta1.JoinConfiguration
externalcfg *kubeadmapiv1beta2.JoinConfiguration
certificateKey string
}
@ -217,7 +217,7 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command {
}
// addJoinConfigFlags adds join flags bound to the config to the specified flagset
func addJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta1.JoinConfiguration) {
func addJoinConfigFlags(flagSet *flag.FlagSet, cfg *kubeadmapiv1beta2.JoinConfiguration) {
flagSet.StringVar(
&cfg.NodeRegistration.Name, options.NodeName, cfg.NodeRegistration.Name,
`Specify the node name.`,
@ -283,13 +283,13 @@ func addJoinOtherFlags(flagSet *flag.FlagSet, joinOptions *joinOptions) {
// newJoinOptions returns a struct ready for being used for creating cmd join flags.
func newJoinOptions() *joinOptions {
// initialize the public kubeadm config API by applying defaults
externalcfg := &kubeadmapiv1beta1.JoinConfiguration{}
externalcfg := &kubeadmapiv1beta2.JoinConfiguration{}
// Add optional config objects to host flags.
// un-set objects will be cleaned up afterwards (into newJoinData func)
externalcfg.Discovery.File = &kubeadmapiv1beta1.FileDiscovery{}
externalcfg.Discovery.BootstrapToken = &kubeadmapiv1beta1.BootstrapTokenDiscovery{}
externalcfg.ControlPlane = &kubeadmapiv1beta1.JoinControlPlane{}
externalcfg.Discovery.File = &kubeadmapiv1beta2.FileDiscovery{}
externalcfg.Discovery.BootstrapToken = &kubeadmapiv1beta2.BootstrapTokenDiscovery{}
externalcfg.ControlPlane = &kubeadmapiv1beta2.JoinControlPlane{}
// Apply defaults
kubeadmscheme.Scheme.Default(externalcfg)
@ -377,7 +377,7 @@ func newJoinData(cmd *cobra.Command, args []string, opt *joinOptions, out io.Wri
return nil, errors.Errorf("File %s does not exists. Please use 'kubeadm join phase control-plane-prepare' subcommands to generate it.", adminKubeConfigPath)
}
klog.V(1).Infof("[preflight] found discovery flags missing for this command. using FileDiscovery: %s", adminKubeConfigPath)
opt.externalcfg.Discovery.File = &kubeadmapiv1beta1.FileDiscovery{KubeConfigPath: adminKubeConfigPath}
opt.externalcfg.Discovery.File = &kubeadmapiv1beta2.FileDiscovery{KubeConfigPath: adminKubeConfigPath}
opt.externalcfg.Discovery.BootstrapToken = nil //NB. this could be removed when we get better control on args (e.g. phases without discovery should have NoArgs )
}

View File

@ -26,7 +26,7 @@ import (
)
const (
testJoinConfig = `apiVersion: kubeadm.k8s.io/v1beta1
testJoinConfig = `apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
bootstrapToken:

View File

@ -12,7 +12,7 @@ go_library(
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",

View File

@ -23,14 +23,14 @@ import (
"github.com/spf13/pflag"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// NewBootstrapTokenOptions creates a new BootstrapTokenOptions object with the default values
func NewBootstrapTokenOptions() *BootstrapTokenOptions {
bto := &BootstrapTokenOptions{&kubeadmapiv1beta1.BootstrapToken{}, ""}
kubeadmapiv1beta1.SetDefaults_BootstrapToken(bto.BootstrapToken)
bto := &BootstrapTokenOptions{&kubeadmapiv1beta2.BootstrapToken{}, ""}
kubeadmapiv1beta2.SetDefaults_BootstrapToken(bto.BootstrapToken)
return bto
}
@ -38,7 +38,7 @@ func NewBootstrapTokenOptions() *BootstrapTokenOptions {
// and applying the parsed flags to a InitConfiguration object later at runtime
// TODO: In the future, we might want to group the flags in a better way than adding them all individually like this
type BootstrapTokenOptions struct {
*kubeadmapiv1beta1.BootstrapToken
*kubeadmapiv1beta2.BootstrapToken
TokenStr string
}
@ -89,16 +89,16 @@ func (bto *BootstrapTokenOptions) AddDescriptionFlag(fs *pflag.FlagSet) {
// ApplyTo applies the values set internally in the BootstrapTokenOptions object to a InitConfiguration object at runtime
// If --token was specified in the CLI (as a string), it's parsed and validated before it's added to the BootstrapToken object.
func (bto *BootstrapTokenOptions) ApplyTo(cfg *kubeadmapiv1beta1.InitConfiguration) error {
func (bto *BootstrapTokenOptions) ApplyTo(cfg *kubeadmapiv1beta2.InitConfiguration) error {
if len(bto.TokenStr) > 0 {
var err error
bto.Token, err = kubeadmapiv1beta1.NewBootstrapTokenString(bto.TokenStr)
bto.Token, err = kubeadmapiv1beta2.NewBootstrapTokenString(bto.TokenStr)
if err != nil {
return err
}
}
// Set the token specified by the flags as the first and only token to create in case --config is not specified
cfg.BootstrapTokens = []kubeadmapiv1beta1.BootstrapToken{*bto.BootstrapToken}
cfg.BootstrapTokens = []kubeadmapiv1beta2.BootstrapToken{*bto.BootstrapToken}
return nil
}

View File

@ -6,7 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//pkg/version:go_default_library",
],
)
@ -16,7 +16,7 @@ go_test(
srcs = ["util_test.go"],
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm/v1beta1:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//pkg/version:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More