Merge branch 'master' into fix_shellcheck_failure_make-rules

This commit is contained in:
Kohei Toyoda 2019-05-17 08:43:08 +09:00 committed by GitHub
commit 62d5eb524b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
735 changed files with 23006 additions and 10752 deletions

1
.gitattributes vendored
View File

@ -7,5 +7,4 @@ test/test_owners.csv merge=union
**/generated.proto
**/types_swagger_doc_generated.go linguist-generated=true
api/openapi-spec/*.json linguist-generated=true
staging/**/go.mod linguist-generated=true
staging/**/go.sum linguist-generated=true

View File

@ -1,16 +1,23 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.14.1](#v1141)
- [Downloads for v1.14.1](#downloads-for-v1141)
- [v1.14.2](#v1142)
- [Downloads for v1.14.2](#downloads-for-v1142)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Changelog since v1.14.1](#changelog-since-v1141)
- [Other notable changes](#other-notable-changes)
- [v1.14.0](#v1140)
- [Downloads for v1.14.0](#downloads-for-v1140)
- [v1.14.1](#v1141)
- [Downloads for v1.14.1](#downloads-for-v1141)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.14.0](#changelog-since-v1140)
- [Other notable changes](#other-notable-changes-1)
- [v1.14.0](#v1140)
- [Downloads for v1.14.0](#downloads-for-v1140)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Kubernetes v1.14 Release Notes](#kubernetes-v114-release-notes)
- [1.14 Whats New](#114-whats-new)
- [Known Issues](#known-issues)
@ -42,57 +49,140 @@
- [External Dependencies](#external-dependencies)
- [v1.14.0-rc.1](#v1140-rc1)
- [Downloads for v1.14.0-rc.1](#downloads-for-v1140-rc1)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-1)
- [v1.14.0-beta.2](#v1140-beta2)
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
- [Action Required](#action-required-1)
- [Changelog since v1.14.0-beta.2](#changelog-since-v1140-beta2)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-2)
- [v1.14.0-beta.1](#v1140-beta1)
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
- [v1.14.0-beta.2](#v1140-beta2)
- [Downloads for v1.14.0-beta.2](#downloads-for-v1140-beta2)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
- [Action Required](#action-required-2)
- [Changelog since v1.14.0-beta.1](#changelog-since-v1140-beta1)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-3)
- [v1.14.0-alpha.3](#v1140-alpha3)
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
- [v1.14.0-beta.1](#v1140-beta1)
- [Downloads for v1.14.0-beta.1](#downloads-for-v1140-beta1)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
- [Action Required](#action-required-3)
- [Changelog since v1.14.0-alpha.3](#changelog-since-v1140-alpha3)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-4)
- [v1.14.0-alpha.2](#v1140-alpha2)
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
- [v1.14.0-alpha.3](#v1140-alpha3)
- [Downloads for v1.14.0-alpha.3](#downloads-for-v1140-alpha3)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
- [Action Required](#action-required-4)
- [Changelog since v1.14.0-alpha.2](#changelog-since-v1140-alpha2)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-5)
- [v1.14.0-alpha.1](#v1140-alpha1)
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
- [v1.14.0-alpha.2](#v1140-alpha2)
- [Downloads for v1.14.0-alpha.2](#downloads-for-v1140-alpha2)
- [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7)
- [Changelog since v1.14.0-alpha.1](#changelog-since-v1140-alpha1)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-6)
- [v1.14.0-alpha.1](#v1140-alpha1)
- [Downloads for v1.14.0-alpha.1](#downloads-for-v1140-alpha1)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Changelog since v1.13.0](#changelog-since-v1130)
- [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-6)
- [Other notable changes](#other-notable-changes-7)
<!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY -->
# v1.14.2
[Documentation](https://docs.k8s.io)
## Downloads for v1.14.2
filename | sha512 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes.tar.gz) | `ef1228ef7cdc3a53e9a5003acb1616aff48eba53db147af82c5e318c174f14db410bb55c030acd67d7f7694b085185ca5f9ac1d3fb9bb6ec853196571e86ad2e`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-src.tar.gz) | `1721ea726dd19f06bade3e9751379764ffb16289b8902164d78a000eb22da15f11358b208f3996df09cd805f98daa540e49f156c1b7aabee6a06df13de8386ca`
### Client Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-386.tar.gz) | `f707f3293173cbb47dc8537b19d7da443e40d9c2b3945e8e0559513d227d98a97058b5ee3762fbf93e79b98bceadb23fc985bfbff33c8f4970966383d5032df1`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-darwin-amd64.tar.gz) | `dcd61588fc0b27d6539f937106a88f8ebb3f19e9a41d37a79804a2594e12860247883374d7594b52a248915820be98b0dd7f756e581f5512cf731f9992bc3950`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-386.tar.gz) | `90ad262988898cc25c2d84fdf1d62d3cdf8f16a9b7598d477a1b516b7e87e19196a4e501388e68fccc30916ac617977f6e22e4ec13fa2046bda47d386b45a0e6`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-amd64.tar.gz) | `a4394293cecdc177db7d3ef29f9d9efb7f922d193b00d83fa17c847e2aa1cd1c38eff1f4233843fededf15d99f7c434bf701d84b93a3cb834a4699cbddf02385`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm.tar.gz) | `265599b200f6de8d2c01ac36a33a0fca9faf36fb68e3e3dd5dad9166b9e6605db2aadd4199a05b5b9e20d065a8e59e7d0d130e5038dc01b37ed9705a8550d677`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-arm64.tar.gz) | `31799018b7840cafac0fa4f8cc474396feaab71340eb7f38a122109fdcf759afc6066e67c5a26fe234232ab9a180d7312e81b3911c153f2e949415236a7b1709`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-ppc64le.tar.gz) | `670bbe7c3142ccfa99a1eebc6d94798a8c3720165301ef615812aea64e512e03db4a9e2d80bfa073083b87c1a123a1a8e0c72fe2be26e2dfe8a499a3237deb32`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-linux-s390x.tar.gz) | `58d161e747ec0924f3a937bd4e6856be9bad9227ca2564f2b59cdc9bfd063d78cb9c6381441aac21d3d809a1edee059697cbef5aabd344bb3fb58d4a56641415`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-386.tar.gz) | `696caeb9611137bce1988c8bf7a1e326f92dbb6f9eb31f82cc2d9cf262888b220c3abed5edb8807c58d37b659a80e46f79ecb9d8ea67627cf6a7e6b9ffa3e5c6`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-client-windows-amd64.tar.gz) | `156ccc2102a6f92fe1078feaed835913b34eac94bbd0846726eb43fa60f0beb724355e3a3be4de87630f27f67effdd88a5014aa197ba8695bf36da2b70ee1c14`
### Server Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-amd64.tar.gz) | `f7d9687eb49ea71f0d8b1ccfac33ed05cd341d7cfacb0711fce4a722801769deb05f72f19ade10b6dc29409f0c9136653c489653ca1f20b698c1310f8a43600f`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm.tar.gz) | `5c2247e4cab886cbca59ef47ea32d9ab8bb5f47495f844337dadce2362b76ebedc8a912f34131f9ec2e15bcb9023d75efb561ce7e51ce5fc7d0cb6f058a96840`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-arm64.tar.gz) | `a341bb15e659d4000fe29b88371cc1c02df4715786901b870546c04cd943f5cad56bd4f014062c4ef2d601f107038bb4024c029f62b8b37456bbcf4d14cfc5d0`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-ppc64le.tar.gz) | `d179c809da68cc4530910dd1a7c3749598bd40f5b7a773b2b3a9b9d0b0e25c5a0fa8f2caa8f1874b7168d2acb708f0d5014ca4f4721252ce414e36734485e32b`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-server-linux-s390x.tar.gz) | `fdc8ffccd1c5a2e225f19b52eabceae5e8fac5e599235797b96d37223df10d45f70218dcbf5027a00db0129929fe179cd16b1f42ae2a6e7a4d020a642cd03981`
### Node Binaries
filename | sha512 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-amd64.tar.gz) | `12c6139a4b497220f239f6c5e9a9b2e864d6dc298495ef4243b780fcf6c9c3aab53c88fa33d8527ed45d79de707cbce733e0c34c06b10fe2a07b4c3daafc0f50`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm.tar.gz) | `53e14c9dd53562747dcfdfff7738bccdd369a2bd6f550e1ce181aa219e48c0fe92f786c4ed8d4f62fada48018917d573e4e63c0168bf205b707309ef78bac9b5`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-arm64.tar.gz) | `5917436bdafab57f6564d6e32819b28f32d373bdb22ae53a46f7c7510283ffa87199d08db31862f8db286d5e96a37e299f8a31f0fd630bfd94698ba58b16e9af`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-ppc64le.tar.gz) | `12a8ca3c87f165ef4eb493adcd3038d5689c592b411ebbbc97741b1de67a40f91fed7c83d0bf97bd59719c8d08e686c49e6d6dd9c6ef24b80010eb0777614187`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-linux-s390x.tar.gz) | `1919f8b370199261803ec856e558ad75100cf6db8f5619be5710f528a46a6c58692d659bb11525e351fd46673765348050ea6f1a7427fd458386f807040b67eb`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.14.2/kubernetes-node-windows-amd64.tar.gz) | `86057b6ca519a6b454a4b898c7a12f12a2bb25c8be85e53fd2c9b1e4191e334611ca87e679b5832acdd37c05486972eb9c0b4c4bcbf4b688239d9482a9590745`
## Changelog since v1.14.1
### Other notable changes
* Update to use go 1.12.4 ([#76576](https://github.com/kubernetes/kubernetes/pull/76576), [@cblecker](https://github.com/cblecker))
* Update to use go 1.12.5 ([#77528](https://github.com/kubernetes/kubernetes/pull/77528), [@cblecker](https://github.com/cblecker))
* Check if container memory stats are available before accessing it ([#77656](https://github.com/kubernetes/kubernetes/pull/77656), [@yastij](https://github.com/yastij))
* Bump addon-manager to v9.0.1 ([#77282](https://github.com/kubernetes/kubernetes/pull/77282), [@MrHohn](https://github.com/MrHohn))
* - Rebase image on debian-base:v1.0.0
* If a pod has a running instance, the stats of its previously terminated instances will not show up in the kubelet summary stats any more for CRI runtimes like containerd and cri-o. ([#77426](https://github.com/kubernetes/kubernetes/pull/77426), [@Random-Liu](https://github.com/Random-Liu))
* This keeps the behavior consistent with Docker integration, and fixes an issue that some container Prometheus metrics don't work when there are summary stats for multiple instances of the same pod.
* Add name validation for dynamic client methods in client-go ([#75072](https://github.com/kubernetes/kubernetes/pull/75072), [@lblackstone](https://github.com/lblackstone))
* Fix issue in Portworx volume driver causing controller manager to crash ([#76341](https://github.com/kubernetes/kubernetes/pull/76341), [@harsh-px](https://github.com/harsh-px))
* Fixes segmentation fault issue with Protobuf library when log entries are deeply nested. ([#77224](https://github.com/kubernetes/kubernetes/pull/77224), [@qingling128](https://github.com/qingling128))
* Update Cluster Autoscaler to 1.14.2 ([#77064](https://github.com/kubernetes/kubernetes/pull/77064), [@losipiuk](https://github.com/losipiuk))
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.2
* - https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.14.1
* Fixes an error with stuck informers when an etcd watch receives update or delete events with missing data ([#76675](https://github.com/kubernetes/kubernetes/pull/76675), [@ryanmcnamara](https://github.com/ryanmcnamara))
* [fluentd-gcp addon] Bump fluentd-gcp-scaler to v0.5.2 to pick up security fixes. ([#76762](https://github.com/kubernetes/kubernetes/pull/76762), [@serathius](https://github.com/serathius))
* specify azure file share name in azure file plugin ([#76988](https://github.com/kubernetes/kubernetes/pull/76988), [@andyzhangx](https://github.com/andyzhangx))
* Windows nodes on GCE use a known-working 1809 image rather than the latest 1809 image. ([#76722](https://github.com/kubernetes/kubernetes/pull/76722), [@pjh](https://github.com/pjh))
* kube-proxy: os exit when CleanupAndExit is set to true ([#76732](https://github.com/kubernetes/kubernetes/pull/76732), [@JieJhih](https://github.com/JieJhih))
* Clean links handling in cp's tar code ([#76788](https://github.com/kubernetes/kubernetes/pull/76788), [@soltysh](https://github.com/soltysh))
* Adds a new "storage_operation_status_count" metric for kube-controller-manager and kubelet to count success and error statues. ([#75750](https://github.com/kubernetes/kubernetes/pull/75750), [@msau42](https://github.com/msau42))
* kubeadm: Fix a bug where if couple of CRIs are installed a user override of the CRI during join (via kubeadm join --cri-socket ...) is ignored and kubeadm bails out with an error ([#76505](https://github.com/kubernetes/kubernetes/pull/76505), [@rosti](https://github.com/rosti))
* fix detach azure disk back off issue which has too big lock in failure retry condition ([#76573](https://github.com/kubernetes/kubernetes/pull/76573), [@andyzhangx](https://github.com/andyzhangx))
* Ensure the backend pools are set correctly for Azure SLB with multiple backend pools (e.g. outbound rules) ([#76691](https://github.com/kubernetes/kubernetes/pull/76691), [@feiskyer](https://github.com/feiskyer))
* fix azure disk list corruption issue ([#77187](https://github.com/kubernetes/kubernetes/pull/77187), [@andyzhangx](https://github.com/andyzhangx))
* [IPVS] Introduces flag ipvs-strict-arp to configure stricter ARP sysctls, defaulting to false to preserve existing behaviors. This was enabled by default in 1.13.0, which impacted a few CNI plugins. ([#75295](https://github.com/kubernetes/kubernetes/pull/75295), [@lbernail](https://github.com/lbernail))
* [metrics-server addon] Restore connecting to nodes via IP addresses ([#76819](https://github.com/kubernetes/kubernetes/pull/76819), [@serathius](https://github.com/serathius))
* Fixes a NPD bug on GCI, so that it disables glog writing to files for log-counter ([#76211](https://github.com/kubernetes/kubernetes/pull/76211), [@wangzhen127](https://github.com/wangzhen127))
* Fixes bug in DaemonSetController causing it to stop processing some DaemonSets for 5 minutes after node removal. ([#76060](https://github.com/kubernetes/kubernetes/pull/76060), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski))
# v1.14.1
[Documentation](https://docs.k8s.io)

35
Godeps/LICENSES generated
View File

@ -9400,6 +9400,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/google/go-cmp licensed under: =
Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/google/go-cmp/LICENSE 4ac66f7dea41d8d116cb7fb28aeff2ab
================================================================================
================================================================================
= vendor/github.com/google/gofuzz licensed under: =

View File

@ -226,6 +226,7 @@ aliases:
- thockin
- rramkumar1
- cmluciano
- m1093782566
sig-apps-approvers:
- kow3ns
- janetkuo

View File

@ -30,6 +30,9 @@ API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiexten
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSON,Raw
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Ref
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,Schema
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XEmbeddedResource
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XIntOrString
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaProps,XPreserveUnknownFields
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,JSONSchemas
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrArray,Schema
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1,JSONSchemaPropsOrBool,Allows

View File

@ -6055,6 +6055,10 @@
"io.k8s.api.core.v1.CSIPersistentVolumeSource": {
"description": "Represents storage that is managed by an external CSI volume driver (Beta feature)",
"properties": {
"controllerExpandSecretRef": {
"$ref": "#/definitions/io.k8s.api.core.v1.SecretReference",
"description": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed."
},
"controllerPublishSecretRef": {
"$ref": "#/definitions/io.k8s.api.core.v1.SecretReference",
"description": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed."
@ -6443,7 +6447,7 @@
"type": "string"
},
"optional": {
"description": "Specify whether the ConfigMap or it's key must be defined",
"description": "Specify whether the ConfigMap or its key must be defined",
"type": "boolean"
}
},
@ -6533,7 +6537,7 @@
"type": "string"
},
"optional": {
"description": "Specify whether the ConfigMap or it's keys must be defined",
"description": "Specify whether the ConfigMap or its keys must be defined",
"type": "boolean"
}
},
@ -6559,7 +6563,7 @@
"type": "string"
},
"optional": {
"description": "Specify whether the ConfigMap or it's keys must be defined",
"description": "Specify whether the ConfigMap or its keys must be defined",
"type": "boolean"
}
},
@ -10315,7 +10319,7 @@
"type": "string"
},
"optional": {
"description": "Specify whether the Secret or it's key must be defined",
"description": "Specify whether the Secret or its key must be defined",
"type": "boolean"
}
},
@ -10410,7 +10414,7 @@
"type": "array"
},
"optional": {
"description": "Specify whether the Secret or it's keys must be defined",
"description": "Specify whether the Secret or its keys must be defined",
"type": "boolean"
},
"secretName": {
@ -16808,6 +16812,18 @@
},
"uniqueItems": {
"type": "boolean"
},
"x-kubernetes-embedded-resource": {
"description": "x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).",
"type": "boolean"
},
"x-kubernetes-int-or-string": {
"description": "x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:\n\n1) anyOf:\n - type: integer\n - type: string\n2) allOf:\n - anyOf:\n - type: integer\n - type: string\n - ... zero or more",
"type": "boolean"
},
"x-kubernetes-preserve-unknown-fields": {
"description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.",
"type": "boolean"
}
},
"type": "object"
@ -18390,7 +18406,7 @@
},
"info": {
"title": "Kubernetes",
"version": "v1.15.0"
"version": "v1.16.0"
},
"paths": {
"/api/": {

View File

@ -15,7 +15,7 @@
# This file creates a standard build environment for building cross
# platform go binary for the architecture kubernetes cares about.
FROM golang:1.12.4
FROM golang:1.12.5
ENV GOARM 7
ENV KUBE_DYNAMIC_CROSSPLATFORMS \

View File

@ -1 +1 @@
v1.12.4-1
v1.12.5-1

View File

@ -78,10 +78,11 @@ define ALL_HELP_INFO
# make
# make all
# make all WHAT=cmd/kubelet GOFLAGS=-v
# make all GOGCFLAGS="-N -l"
# Note: Use the -N -l options to disable compiler optimizations an inlining.
# Using these build options allows you to subsequently use source
# debugging tools like delve.
# make all GOLDFLAGS=""
# Note: Specify GOLDFLAGS as an empty string for building unstripped binaries, which allows
# you to use code debugging tools like delve. When GOLDFLAGS is unspecified, it defaults
# to "-s -w" which strips debug information. Other flags that can be used for GOLDFLAGS
# are documented at https://golang.org/cmd/link/
endef
.PHONY: all
ifeq ($(PRINT_HELP),y)

View File

@ -1,13 +1,15 @@
workspace(name = "io_k8s_kubernetes")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("//build:workspace_mirror.bzl", "mirror")
http_archive(
name = "bazel_toolchains",
sha256 = "f5acacb61693e00c993dbe3357cb4eb71eb49c6ed1e8b11215cef8738c7674cb",
strip_prefix = "bazel-toolchains-997c10a",
sha256 = "3a6ffe6dd91ee975f5d5bc5c50b34f58e3881dfac59a7b7aba3323bd8f8571a8",
strip_prefix = "bazel-toolchains-92dd8a7",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/997c10a.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/997c10a.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/archive/92dd8a7.tar.gz",
],
)
@ -42,8 +44,8 @@ http_archive(
http_archive(
name = "io_bazel_rules_go",
sha256 = "91b79f4758fd16f2c6426279ce00c1d2d8577d61c519db39675ed84657e1a95e",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.4/rules_go-0.17.4.tar.gz"),
sha256 = "87a089eabf919de29eb4711daa52ffbc4b22b2c045949c20503357a3cadf1037",
urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.17.5/rules_go-0.17.5.tar.gz"),
)
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
@ -51,7 +53,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.12.4",
go_version = "1.12.5",
)
http_archive(

View File

@ -21,7 +21,7 @@ spec:
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.7.1
name: autoscaler
command:
- /cpvpa

View File

@ -24,7 +24,7 @@ spec:
supplementalGroups: [ 65534 ]
fsGroup: 65534
containers:
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0
name: autoscaler
command:
- /cluster-proportional-autoscaler

View File

@ -21,7 +21,7 @@ spec:
spec:
priorityClassName: system-cluster-critical
containers:
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
- image: k8s.gcr.io/cpvpa-amd64:v0.7.1
name: autoscaler
command:
- /cpvpa

View File

@ -2,13 +2,14 @@
approvers:
- bryk
- floreks
- jeefy
- maciaszczykm
reviewers:
- cheld
- cupofcat
- danielromlein
- floreks
- ianlewis
- konryd
- maciaszczykm
- mhenc
- rf232
- rf232

View File

@ -85,7 +85,7 @@ spec:
fsGroup: 65534
containers:
- name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0
resources:
requests:
cpu: "20m"

View File

@ -120,7 +120,7 @@ spec:
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
memory: __PILLAR__DNS__MEMORY__LIMIT__
requests:
cpu: 100m
memory: 70Mi

View File

@ -120,7 +120,7 @@ spec:
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
memory: {{ pillar['dns_memory_limit'] }}
requests:
cpu: 100m
memory: 70Mi

View File

@ -120,7 +120,7 @@ spec:
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
memory: $DNS_MEMORY_LIMIT
requests:
cpu: 100m
memory: 70Mi

View File

@ -1,4 +1,5 @@
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -1,4 +1,5 @@
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -106,7 +106,7 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
memory: __PILLAR__DNS__MEMORY__LIMIT__
requests:
cpu: 100m
memory: 70Mi

View File

@ -106,7 +106,7 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
memory: {{ pillar['dns_memory_limit'] }}
requests:
cpu: 100m
memory: 70Mi

View File

@ -106,7 +106,7 @@ spec:
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
memory: $DNS_MEMORY_LIMIT
requests:
cpu: 100m
memory: 70Mi

View File

@ -1,4 +1,5 @@
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
s/__PILLAR__DNS__MEMORY__LIMIT__/{{ pillar['dns_memory_limit'] }}/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -1,4 +1,5 @@
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
s/__PILLAR__DNS__MEMORY__LIMIT__/$DNS_MEMORY_LIMIT/g
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g

View File

@ -17,7 +17,7 @@ COPY elasticsearch_logging_discovery.go go.mod go.sum /
RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build -a -ldflags "-w" -o /elasticsearch_logging_discovery /elasticsearch_logging_discovery.go
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.6.1
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.7.2
VOLUME ["/data"]
EXPOSE 9200 9300

View File

@ -16,7 +16,7 @@
PREFIX = gcr.io/fluentd-elasticsearch
IMAGE = elasticsearch
TAG = v6.6.1
TAG = v6.7.2
build:
gcloud builds submit --tag ${PREFIX}/${IMAGE}:${TAG}

View File

@ -59,12 +59,12 @@ spec:
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: v6.6.1
version: v6.7.2
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v6.6.1
version: v6.7.2
spec:
serviceAccountName: elasticsearch-logging
containers:

View File

@ -45,22 +45,22 @@ roleRef:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v2.5.1
name: fluentd-es-v2.5.2
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v2.5.1
version: v2.5.2
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v2.5.1
version: v2.5.2
template:
metadata:
labels:
k8s-app: fluentd-es
version: v2.5.1
version: v2.5.2
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
@ -72,7 +72,7 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.1
image: gcr.io/fluentd-elasticsearch/fluentd:v2.5.2
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q

View File

@ -1,12 +1,12 @@
source 'https://rubygems.org'
gem 'activesupport', '~>5.2.2'
gem 'fluentd', '<=1.4.1'
gem 'fluentd', '<=1.4.2'
gem 'fluent-plugin-concat', '~>2.3.0'
gem 'fluent-plugin-detect-exceptions', '~>0.0.12'
gem 'fluent-plugin-elasticsearch', '~>3.3.3'
gem 'fluent-plugin-elasticsearch', '~>3.4.3'
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.1.6'
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
gem 'fluent-plugin-prometheus', '~>1.3.0'
gem 'fluent-plugin-systemd', '~>1.0.2'
gem 'oj', '~>3.7.9'
gem 'oj', '~>3.7.12'

View File

@ -16,7 +16,7 @@
PREFIX = gcr.io/fluentd-elasticsearch
IMAGE = fluentd
TAG = v2.5.1
TAG = v2.5.2
build:
gcloud builds submit --tag $(PREFIX)/$(IMAGE):$(TAG)

View File

@ -29,11 +29,11 @@ subjects:
apiVersion: apps/v1
kind: Deployment
metadata:
name: event-exporter-v0.2.4
name: event-exporter-v0.2.5
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.2.4
version: v0.2.5
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
@ -41,17 +41,17 @@ spec:
selector:
matchLabels:
k8s-app: event-exporter
version: v0.2.4
version: v0.2.5
template:
metadata:
labels:
k8s-app: event-exporter
version: v0.2.4
version: v0.2.5
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: k8s.gcr.io/event-exporter:v0.2.4
image: k8s.gcr.io/event-exporter:v0.2.5
command:
- /event-exporter
- -sink-opts=-stackdriver-resource-model={{ exporter_sd_resource_model }}

View File

@ -48,10 +48,10 @@ spec:
- effect: NoExecute
operator: Exists
containers:
- image: k8s.gcr.io/gke-node-termination-handler@sha256:e08ca863a547754fa7b75064bdad04f04cbef86c7b0a181ecc7304e747623181
- image: k8s.gcr.io/gke-node-termination-handler@sha256:aca12d17b222dfed755e28a44d92721e477915fb73211d0a0f8925a1fa847cca
name: node-termination-handler
command: ["./node-termination-handler"]
args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--annotation=cloud.google.com/impending-node-termination"]
args: ["--logtostderr", "--exclude-pods=$(POD_NAME):$(POD_NAMESPACE)", "-v=10", "--kubeconfig=/var/lib/kubelet/kubeconfig", "--taint=cloud.google.com/impending-node-termination::NoSchedule"]
securityContext:
capabilities:
# Necessary to reboot node

View File

@ -254,6 +254,7 @@ CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"

View File

@ -175,7 +175,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
# Useful for scheduling heapster in large clusters with nodes of small size.
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.3.10-0) if you need
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.3.10-1) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}"
@ -285,6 +285,7 @@ ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}"
DNS_DOMAIN="cluster.local"
DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"

View File

@ -49,6 +49,7 @@ readonly APISERVER_SERVER_KEY_PATH=/foo/bar
readonly APISERVER_CLIENT_CERT_PATH=/foo/bar
readonly CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
readonly CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
readonly INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080},"
readonly DOCKER_REGISTRY="k8s.gcr.io"
readonly ENABLE_LEGACY_ABAC=false
readonly ETC_MANIFESTS=${KUBE_HOME}/etc/kubernetes/manifests
@ -93,12 +94,11 @@ func (c *kubeAPIServerManifestTestCase) invokeTest(e kubeAPIServerEnv, kubeEnv s
func TestEncryptionProviderFlag(t *testing.T) {
var (
// command": [
// "/usr/local/bin/kube-apiserver " - Index 0,
// "--flag1=val1", - Index 1,
// "--flag2=val2", - Index 2,
// ...
// "--flagN=valN", - Index N,
// command": [
// "/bin/sh", - Index 0
// "-c", - Index 1
// "exec /usr/local/bin/kube-apiserver " - Index 2
execArgsIndex = 2
encryptionConfigFlag = "--encryption-provider-config"
)
@ -132,15 +132,10 @@ func TestEncryptionProviderFlag(t *testing.T) {
c.invokeTest(e, deployHelperEnv)
var flagIsInArg bool
var flag, execArgs string
for _, execArgs = range c.pod.Spec.Containers[0].Args[1:] {
if strings.Contains(execArgs, encryptionConfigFlag) {
flagIsInArg = true
flag = fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath)
break
}
}
execArgs := c.pod.Spec.Containers[0].Command[execArgsIndex]
flagIsInArg := strings.Contains(execArgs, encryptionConfigFlag)
flag := fmt.Sprintf("%s=%s", encryptionConfigFlag, e.EncryptionProviderConfigPath)
switch {
case tc.wantFlag && !flagIsInArg:
t.Fatalf("Got %q,\n want flags to contain %q", execArgs, flag)

View File

@ -25,24 +25,6 @@ set -o errexit
set -o nounset
set -o pipefail
function convert-manifest-params {
# A helper function to convert the manifest args from a string to a list of
# flag arguments.
# Old format:
# command=["/bin/sh", "-c", "exec KUBE_EXEC_BINARY --param1=val1 --param2-val2"].
# New format:
# command=["KUBE_EXEC_BINARY"] # No shell dependencies.
# args=["--param1=val1", "--param2-val2"]
IFS=' ' read -ra FLAGS <<< "$1"
params=""
for flag in "${FLAGS[@]}"; do
params+="\n\"$flag\","
done
if [ ! -z $params ]; then
echo "${params::-1}" # drop trailing comma
fi
}
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
@ -1498,6 +1480,7 @@ function start-etcd-servers {
# DOCKER_REGISTRY
# FLEXVOLUME_HOSTPATH_MOUNT
# FLEXVOLUME_HOSTPATH_VOLUME
# INSECURE_PORT_MAPPING
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
@ -1518,6 +1501,11 @@ function compute-master-manifest-variables {
FLEXVOLUME_HOSTPATH_MOUNT="{ \"name\": \"flexvolumedir\", \"mountPath\": \"${VOLUME_PLUGIN_DIR}\", \"readOnly\": true},"
FLEXVOLUME_HOSTPATH_VOLUME="{ \"name\": \"flexvolumedir\", \"hostPath\": {\"path\": \"${VOLUME_PLUGIN_DIR}\"}},"
fi
INSECURE_PORT_MAPPING=""
if [[ "${ENABLE_APISERVER_INSECURE_PORT:-false}" == "true" ]]; then
INSECURE_PORT_MAPPING="{ \"name\": \"local\", \"containerPort\": 8080, \"hostPort\": 8080},"
fi
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
@ -1542,6 +1530,7 @@ function prepare-mounter-rootfs {
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
# INSECURE_PORT_MAPPING
function start-kube-apiserver {
echo "Start kubernetes api-server"
prepare-log-file "${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}"
@ -1842,10 +1831,6 @@ function start-kube-apiserver {
# params is passed by reference, so no "$"
setup-etcd-encryption "${src_file}" params
params+=" --log-file=${KUBE_API_SERVER_LOG_PATH:-/var/log/kube-apiserver.log}"
params+=" --logtostderr=false"
params+=" --log-file-max-size=0"
params="$(convert-manifest-params "${params}")"
# Evaluate variables.
local -r kube_apiserver_docker_tag="${KUBE_API_SERVER_DOCKER_TAG:-$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -1858,6 +1843,7 @@ function start-kube-apiserver {
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{liveness_probe_initial_delay}}@${KUBE_APISERVER_LIVENESS_PROBE_INITIAL_DELAY_SEC:-15}@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{insecure_port_mapping}}@${INSECURE_PORT_MAPPING}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
@ -2026,8 +2012,7 @@ function apply-encryption-config() {
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubeconfig "kube-controller-manager" ${KUBE_CONTROLLER_MANAGER_TOKEN}
local LOG_PATH=/var/log/kube-controller-manager.log
prepare-log-file "${LOG_PATH}"
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
@ -2055,7 +2040,7 @@ function start-kube-controller-manager {
params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs"
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
@ -2086,14 +2071,9 @@ function start-kube-controller-manager {
params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE"
fi
if [[ -n "${RUN_CONTROLLERS:-}" ]]; then
# Trim the `RUN_CONTROLLERS` value. This field is quoted which is
# incompatible with the `convert-manifest-params` format.
params+=" --controllers=${RUN_CONTROLLERS//\'}"
params+=" --controllers=${RUN_CONTROLLERS}"
fi
params+=" --log-file=${LOG_PATH}"
params+=" --logtostderr=false"
params+=" --log-file-max-size=0"
params="$(convert-manifest-params "${params}")"
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
@ -2128,8 +2108,7 @@ function start-kube-controller-manager {
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubeconfig "kube-scheduler" ${KUBE_SCHEDULER_TOKEN}
local LOG_PATH=/var/log/kube-scheduler.log
prepare-log-file "${LOG_PATH}"
prepare-log-file /var/log/kube-scheduler.log
# Calculate variables and set them in the manifest.
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
@ -2145,11 +2124,6 @@ function start-kube-scheduler {
params+=" --use-legacy-policy-config"
params+=" --policy-config-file=/etc/srv/kubernetes/kube-scheduler/policy-config"
fi
params+=" --log-file=${LOG_PATH}"
params+=" --logtostderr=false"
params+=" --log-file-max-size=0"
params="$(convert-manifest-params "${params}")"
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
@ -2402,6 +2376,7 @@ function setup-coredns-manifest {
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT}@g" "${coredns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
@ -2453,6 +2428,7 @@ EOF
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
sed -i -e "s@{{ *pillar\['dns_memory_limit'\] *}}@${DNS_MEMORY_LIMIT}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"

View File

@ -0,0 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- tallclair
- MrHohn
approvers:
- tallclair
- MrHohn
labels:
- sig/gcp

View File

@ -14,4 +14,4 @@ spec:
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.3.10.0
image: k8s.gcr.io/etcd-empty-dir-cleanup:3.3.10.1

View File

@ -14,7 +14,7 @@
"containers":[
{
"name": "etcd-container",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.3.10-0') }}",
"image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.3.10-1') }}",
"resources": {
"requests": {
"cpu": {{ cpulimit }}

View File

@ -25,12 +25,10 @@
}
},
"command": [
"/usr/local/bin/kube-apiserver"
],
"args": [
"--allow-privileged={{pillar['allow_privileged']}}",
{{params}}
],
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-apiserver {{params}} --allow-privileged={{pillar['allow_privileged']}} 1>>/var/log/kube-apiserver.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {
@ -53,13 +51,11 @@
"timeoutSeconds": 15
},
"ports":[
{{insecure_port_mapping}}
{ "name": "https",
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"containerPort": {{secure_port}},
"hostPort": {{secure_port}}}
],
"volumeMounts": [
{{kms_socket_mount}}
{{encryption_provider_mount}}

View File

@ -25,11 +25,10 @@
}
},
"command": [
"/usr/local/bin/kube-controller-manager"
],
"args": [
{{params}}
],
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-controller-manager {{params}} 1>>/var/log/kube-controller-manager.log 2>&1"
],
{{container_env}}
"livenessProbe": {
"httpGet": {

View File

@ -25,11 +25,10 @@
}
},
"command": [
"/usr/local/bin/kube-scheduler"
],
"args": [
{{params}}
],
"/bin/sh",
"-c",
"exec /usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",

View File

@ -26,7 +26,7 @@ if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
exit 1
fi
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/util.sh"
source "${KUBE_ROOT}/cluster/kube-util.sh"
@ -35,8 +35,9 @@ source "${KUBE_ROOT}/cluster/kube-util.sh"
# Assumed vars:
# PROJECT
function get-k8s-node-routes-count() {
local k8s_node_routes_count=$(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
local k8s_node_routes_count
k8s_node_routes_count=$(gcloud compute routes list \
--project="${PROJECT}" --filter='description=k8s-node-route' \
--format='value(name)' | wc -l)
echo -n "${k8s_node_routes_count}"
}
@ -50,11 +51,12 @@ function get-k8s-node-routes-count() {
# Vars set:
# IP_ALIAS_SUBNETWORK
function detect-k8s-subnetwork() {
local subnetwork_url=$(gcloud compute instances describe \
${KUBE_MASTER} --project=${PROJECT} --zone=${ZONE} \
local subnetwork_url
subnetwork_url=$(gcloud compute instances describe \
"${KUBE_MASTER}" --project="${PROJECT}" --zone="${ZONE}" \
--format='value(networkInterfaces[0].subnetwork)')
if [[ -n ${subnetwork_url} ]]; then
IP_ALIAS_SUBNETWORK=$(echo ${subnetwork_url##*/})
IP_ALIAS_SUBNETWORK=${subnetwork_url##*/}
fi
}
@ -69,21 +71,24 @@ function detect-k8s-subnetwork() {
function set-allow-subnet-cidr-routes-overlap() {
local allow_subnet_cidr_routes_overlap
allow_subnet_cidr_routes_overlap=$(gcloud compute networks subnets \
describe ${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
describe "${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \
--format='value(allowSubnetCidrRoutesOverlap)')
local allow_overlap=$1
if [ ${allow_subnet_cidr_routes_overlap,,} = ${allow_overlap} ]; then
if [ "${allow_subnet_cidr_routes_overlap,,}" = "${allow_overlap}" ]; then
echo "Subnet ${IP_ALIAS_SUBNETWORK}'s allowSubnetCidrRoutesOverlap is already set as $1"
return
fi
echo "Setting subnet \"${IP_ALIAS_SUBNETWORK}\" allowSubnetCidrRoutesOverlap to $1"
local fingerprint=$(gcloud compute networks subnets describe \
${IP_ALIAS_SUBNETWORK} --project=${PROJECT} --region=${REGION} \
local fingerprint
fingerprint=$(gcloud compute networks subnets describe \
"${IP_ALIAS_SUBNETWORK}" --project="${PROJECT}" --region="${REGION}" \
--format='value(fingerprint)')
local access_token=$(gcloud auth print-access-token)
local access_token
access_token=$(gcloud auth print-access-token)
local request="{\"allowSubnetCidrRoutesOverlap\":$1, \"fingerprint\":\"${fingerprint}\"}"
local subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}"
local subnetwork_url
subnetwork_url="${GCE_API_ENDPOINT}projects/${PROJECT}/regions/${REGION}/subnetworks/${IP_ALIAS_SUBNETWORK}"
until curl -s --header "Content-Type: application/json" --header "Authorization: Bearer ${access_token}" \
-X PATCH -d "${request}" "${subnetwork_url}" --output /dev/null; do
printf "."
@ -100,7 +105,8 @@ function set-allow-subnet-cidr-routes-overlap() {
# CLUSTER_IP_RANGE
# SERVICE_CLUSTER_IP_RANGE
function add-k8s-subnet-secondary-ranges() {
local secondary_ranges=$(gcloud compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \
local secondary_ranges
secondary_ranges=$(gcloud compute networks subnets describe "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--format='value(secondaryIpRanges)')
if [[ "${secondary_ranges}" =~ "pods-default" && "${secondary_ranges}" =~ "services-default" ]]; then
@ -109,8 +115,8 @@ function add-k8s-subnet-secondary-ranges() {
fi
echo "Adding secondary ranges: pods-default (${CLUSTER_IP_RANGE}), services-default (${SERVICE_CLUSTER_IP_RANGE})"
until gcloud compute networks subnets update ${IP_ALIAS_SUBNETWORK} \
--project=${PROJECT} --region=${REGION} \
until gcloud compute networks subnets update "${IP_ALIAS_SUBNETWORK}" \
--project="${PROJECT}" --region="${REGION}" \
--add-secondary-ranges="pods-default=${CLUSTER_IP_RANGE},services-default=${SERVICE_CLUSTER_IP_RANGE}"; do
printf "."
sleep 1
@ -124,9 +130,12 @@ function add-k8s-subnet-secondary-ranges() {
function delete-k8s-node-routes() {
local -a routes
local -r batch=200
routes=( $(gcloud compute routes list \
--project=${PROJECT} --filter='description=k8s-node-route' \
--format='value(name)') )
routes=()
while IFS=$'\n' read -r route; do
routes+=( "${route}" )
done < <(gcloud compute routes list \
--project="${PROJECT}" --filter='description=k8s-node-route' \
--format='value(name)')
while (( "${#routes[@]}" > 0 )); do
echo Deleting k8s node routes "${routes[*]::${batch}}"
gcloud compute routes delete --project "${PROJECT}" --quiet "${routes[@]::${batch}}"
@ -145,7 +154,7 @@ fi
echo "Found ${k8s_node_routes_count} K8s node routes. Proceeding to upgrade them to IP aliases based connectivity..."
detect-k8s-subnetwork
if [ -z ${IP_ALIAS_SUBNETWORK} ]; then
if [ -z "${IP_ALIAS_SUBNETWORK}" ]; then
echo "No k8s cluster subnetwork found. Exiting..."
exit 1
fi
@ -161,11 +170,11 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true
export SECONDARY_RANGE_NAME="pods-default"
export STORAGE_BACKEND="etcd3"
export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf"
export ETCD_IMAGE=3.3.10-0
export ETCD_IMAGE=3.3.10-1
export ETCD_VERSION=3.3.10
# Upgrade master with updated kube envs
${KUBE_ROOT}/cluster/gce/upgrade.sh -M -l
"${KUBE_ROOT}/cluster/gce/upgrade.sh" -M -l
delete-k8s-node-routes
set-allow-subnet-cidr-routes-overlap false

View File

@ -1123,6 +1123,7 @@ ENABLE_NODELOCAL_DNS: $(yaml-quote ${ENABLE_NODELOCAL_DNS:-false})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
LOCAL_DNS_IP: $(yaml-quote ${LOCAL_DNS_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
DNS_MEMORY_LIMIT: $(yaml-quote ${DNS_MEMORY_LIMIT:-})
ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false})
KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})

View File

@ -20,7 +20,7 @@
ARCH:=amd64
GOLANG_VERSION?=1.8.3
REGISTRY?=staging-k8s.gcr.io
TAG?=0.1.2
TAG?=0.1.3
IMAGE:=$(REGISTRY)/etcd-version-monitor:$(TAG)
CURRENT_DIR:=$(pwd)
TEMP_DIR:=$(shell mktemp -d)

View File

@ -7,7 +7,7 @@ spec:
hostNetwork: true
containers:
- name: etcd-version-monitor
image: k8s.gcr.io/etcd-version-monitor:0.1.2
image: k8s.gcr.io/etcd-version-monitor:0.1.3
command:
- /etcd-version-monitor
- --logtostderr

View File

@ -34,7 +34,7 @@ LATEST_ETCD_VERSION?=3.3.10
# REVISION provides a version number fo this image and all it's bundled
# artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment
# for each revision of this image at that etcd version.
REVISION?=0
REVISION?=1
# IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "<etcd-version>-<revision>".
IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION)

View File

@ -104,7 +104,7 @@ while true; do
# which are important for line counting.
# Use trick from https://unix.stackexchange.com/a/383411 to avoid
# newline truncation.
node=$(kubectl_retry get nodes --chunk-size=0 --no-headers; ret=$?; echo .; exit "$ret") && res="$?" || res="$?"
node=$(kubectl_retry get nodes --no-headers; ret=$?; echo .; exit "$ret") && res="$?" || res="$?"
node="${node%.}"
if [ "${res}" -ne "0" ]; then
if [[ "${attempt}" -gt "${last_run:-$MAX_ATTEMPTS}" ]]; then

View File

@ -103,9 +103,7 @@ func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers [
if controller == "*" {
continue
}
if strings.HasPrefix(controller, "-") {
controller = controller[1:]
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}

View File

@ -31,7 +31,6 @@ go_library(
"//cmd/kube-scheduler/app:go_default_library",
"//cmd/kubeadm/app/cmd:go_default_library",
"//cmd/kubelet/app:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/spf13/cobra/doc:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",

View File

@ -22,7 +22,6 @@ import (
"github.com/spf13/cobra/doc"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app"
"k8s.io/kubernetes/cmd/genutils"
apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app"
@ -54,7 +53,7 @@ func main() {
switch module {
case "kube-apiserver":
// generate docs for kube-apiserver
apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler())
apiserver := apiservapp.NewAPIServerCommand()
doc.GenMarkdownTree(apiserver, outDir)
case "kube-controller-manager":
// generate docs for kube-controller-manager
@ -74,7 +73,7 @@ func main() {
doc.GenMarkdownTree(scheduler, outDir)
case "kubelet":
// generate docs for kubelet
kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler())
kubelet := kubeletapp.NewKubeletCommand()
doc.GenMarkdownTree(kubelet, outDir)
case "kubeadm":
// resets global flags created by kubelet or other commands e.g.

View File

@ -25,7 +25,6 @@ go_library(
"//cmd/kubeadm/app/cmd:go_default_library",
"//cmd/kubelet/app:go_default_library",
"//pkg/kubectl/cmd:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//vendor/github.com/cpuguy83/go-md2man/md2man:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",

View File

@ -26,7 +26,6 @@ import (
mangen "github.com/cpuguy83/go-md2man/md2man"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app"
"k8s.io/kubernetes/cmd/genutils"
apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app"
@ -63,7 +62,7 @@ func main() {
switch module {
case "kube-apiserver":
// generate manpage for kube-apiserver
apiserver := apiservapp.NewAPIServerCommand(server.SetupSignalHandler())
apiserver := apiservapp.NewAPIServerCommand()
genMarkdown(apiserver, "", outDir)
for _, c := range apiserver.Commands() {
genMarkdown(c, "kube-apiserver", outDir)
@ -98,7 +97,7 @@ func main() {
}
case "kubelet":
// generate manpage for kubelet
kubelet := kubeletapp.NewKubeletCommand(server.SetupSignalHandler())
kubelet := kubeletapp.NewKubeletCommand()
genMarkdown(kubelet, "", outDir)
for _, c := range kubelet.Commands() {
genMarkdown(c, "kubelet", outDir)

View File

@ -27,7 +27,6 @@ go_library(
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/kubectl/cmd:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/component-base/cli/flag:go_default_library",
"//staging/src/k8s.io/component-base/logs:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",

View File

@ -32,7 +32,6 @@ import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
cloudcontrollermanager "k8s.io/kubernetes/cmd/cloud-controller-manager/app"
@ -49,7 +48,7 @@ import (
func main() {
rand.Seed(time.Now().UnixNano())
hyperkubeCommand, allCommandFns := NewHyperKubeCommand(server.SetupSignalHandler())
hyperkubeCommand, allCommandFns := NewHyperKubeCommand()
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// cliflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
@ -84,15 +83,15 @@ func commandFor(basename string, defaultCommand *cobra.Command, commands []func(
}
// NewHyperKubeCommand is the entry point for hyperkube
func NewHyperKubeCommand(stopCh <-chan struct{}) (*cobra.Command, []func() *cobra.Command) {
func NewHyperKubeCommand() (*cobra.Command, []func() *cobra.Command) {
// these have to be functions since the command is polymorphic. Cobra wants you to be top level
// command to get executed
apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand(stopCh) }
apiserver := func() *cobra.Command { return kubeapiserver.NewAPIServerCommand() }
controller := func() *cobra.Command { return kubecontrollermanager.NewControllerManagerCommand() }
proxy := func() *cobra.Command { return kubeproxy.NewProxyCommand() }
scheduler := func() *cobra.Command { return kubescheduler.NewSchedulerCommand() }
kubectlCmd := func() *cobra.Command { return kubectl.NewDefaultKubectlCommand() }
kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand(stopCh) }
kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand() }
cloudController := func() *cobra.Command { return cloudcontrollermanager.NewCloudControllerManagerCommand() }
commandFns := []func() *cobra.Command{

View File

@ -22,7 +22,6 @@ go_library(
"//cmd/kube-apiserver/app:go_default_library",
"//pkg/util/prometheusclientgo:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/component-base/logs:go_default_library",
],
)

View File

@ -24,7 +24,6 @@ import (
"os"
"time"
"k8s.io/apiserver/pkg/server"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
_ "k8s.io/kubernetes/pkg/util/prometheusclientgo" // load all the prometheus client-go plugins
@ -34,7 +33,7 @@ import (
func main() {
rand.Seed(time.Now().UnixNano())
command := app.NewAPIServerCommand(server.SetupSignalHandler())
command := app.NewAPIServerCommand()
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the

View File

@ -88,7 +88,7 @@ const etcdRetryLimit = 60
const etcdRetryInterval = 1 * time.Second
// NewAPIServerCommand creates a *cobra.Command object with default parameters
func NewAPIServerCommand(stopCh <-chan struct{}) *cobra.Command {
func NewAPIServerCommand() *cobra.Command {
s := options.NewServerRunOptions()
cmd := &cobra.Command{
Use: "kube-apiserver",
@ -111,7 +111,7 @@ cluster's shared state through which all other components interact.`,
return utilerrors.NewAggregate(errs)
}
return Run(completedOptions, stopCh)
return Run(completedOptions, genericapiserver.SetupSignalHandler())
},
}

View File

@ -72,16 +72,19 @@ import (
)
const (
// Jitter used when starting controller managers
// ControllerStartJitter is the Jitter used when starting controller managers
ControllerStartJitter = 1.0
// ConfigzName is the name used for register kube-controller manager /configz, same with GroupName.
ConfigzName = "kubecontrollermanager.config.k8s.io"
)
// ControllerLoopMode is the kube-controller-manager's mode of running controller loops that are cloud provider dependent
type ControllerLoopMode int
const (
// IncludeCloudLoops means the kube-controller-manager include the controller loops that are cloud provider dependent
IncludeCloudLoops ControllerLoopMode = iota
// ExternalLoops means the kube-controller-manager exclude the controller loops that are cloud provider dependent
ExternalLoops
)
@ -284,6 +287,7 @@ func Run(c *config.CompletedConfig, stopCh <-chan struct{}) error {
panic("unreachable")
}
// ControllerContext defines the context object for controller
type ControllerContext struct {
// ClientBuilder will provide a client for this controller to use
ClientBuilder controller.ControllerClientBuilder
@ -328,6 +332,7 @@ type ControllerContext struct {
ResyncPeriod func() time.Duration
}
// IsControllerEnabled checks if the context's controllers enabled or not
func (c ControllerContext) IsControllerEnabled(name string) bool {
return genericcontrollermanager.IsControllerEnabled(name, ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers)
}
@ -337,6 +342,7 @@ func (c ControllerContext) IsControllerEnabled(name string) bool {
// The bool indicates whether the controller was enabled.
type InitFunc func(ctx ControllerContext) (debuggingHandler http.Handler, enabled bool, err error)
// KnownControllers returns all known controllers's name
func KnownControllers() []string {
ret := sets.StringKeySet(NewControllerInitializers(IncludeCloudLoops))
@ -351,6 +357,7 @@ func KnownControllers() []string {
return ret.List()
}
// ControllersDisabledByDefault is the set of controllers which is disabled by default
var ControllersDisabledByDefault = sets.NewString(
"bootstrapsigner",
"tokencleaner",
@ -405,8 +412,9 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
return controllers
}
// GetAvailableResources gets the map which contains all available resources of the apiserver
// TODO: In general, any controller checking this needs to be dynamic so
// users don't have to restart their controller manager if they change the apiserver.
// users don't have to restart their controller manager if they change the apiserver.
// Until we get there, the structure here needs to be exposed for the construction of a proper ControllerContext.
func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (map[schema.GroupVersionResource]bool, error) {
client := clientBuilder.ClientOrDie("controller-discovery")
@ -484,6 +492,7 @@ func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clien
return ctx, nil
}
// StartControllers starts a set of controllers with a specified ControllerContext
func StartControllers(ctx ControllerContext, startSATokenController InitFunc, controllers map[string]InitFunc, unsecuredMux *mux.PathRecorderMux) error {
// Always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
// If this fails, just return here and fail since other controllers won't be able to get credentials.

View File

@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app imports the API groups that the client will support
// TODO: Remove this file when namespace controller and garbage collector
// stops using legacyscheme.Registry.RESTMapper()
package app
// These imports are the API groups the client will support.
import (
// These imports are the API groups the client will support.
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/authentication/install"
_ "k8s.io/kubernetes/pkg/apis/authorization/install"

View File

@ -27,6 +27,7 @@ import (
// Cloud providers
cloudprovider "k8s.io/cloud-provider"
// ensure the cloud providers are installed
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
// Volume plugins
"k8s.io/kubernetes/pkg/volume"

View File

@ -163,7 +163,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/proxy/apis/config:go_default_library",
"//pkg/util/configz:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -329,11 +329,9 @@ func (o *Options) runLoop() error {
}()
for {
select {
case err := <-o.errCh:
if err != nil {
return err
}
err := <-o.errCh
if err != nil {
return err
}
}
}

View File

@ -237,7 +237,7 @@ func newProxyServer(
}, nil
}
func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string {
func getProxyMode(proxyMode string, iptver iptables.Versioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string {
switch proxyMode {
case proxyModeUserspace:
return proxyModeUserspace
@ -250,7 +250,7 @@ func getProxyMode(proxyMode string, iptver iptables.IPTablesVersioner, khandle i
return tryIPTablesProxy(iptver, kcompat)
}
func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string {
func tryIPVSProxy(iptver iptables.Versioner, khandle ipvs.KernelHandler, ipsetver ipvs.IPSetVersioner, kcompat iptables.KernelCompatTester) string {
// guaranteed false on error, error only necessary for debugging
// IPVS Proxier relies on ip_vs_* kernel modules and ipset
useIPVSProxy, err := ipvs.CanUseIPVSProxier(khandle, ipsetver)
@ -267,7 +267,7 @@ func tryIPVSProxy(iptver iptables.IPTablesVersioner, khandle ipvs.KernelHandler,
return tryIPTablesProxy(iptver, kcompat)
}
func tryIPTablesProxy(iptver iptables.IPTablesVersioner, kcompat iptables.KernelCompatTester) string {
func tryIPTablesProxy(iptver iptables.Versioner, kcompat iptables.KernelCompatTester) string {
// guaranteed false on error, error only necessary for debugging
useIPTablesProxy, err := iptables.CanUseIPTablesProxier(iptver, kcompat)
if err != nil {

View File

@ -29,8 +29,6 @@ import (
func Test_getProxyMode(t *testing.T) {
var cases = []struct {
flag string
annotationKey string
annotationVal string
iptablesVersion string
ipsetVersion string
kmods []string

View File

@ -33,20 +33,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
componentbaseconfig "k8s.io/component-base/config"
api "k8s.io/kubernetes/pkg/apis/core"
kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config"
"k8s.io/kubernetes/pkg/util/configz"
utilpointer "k8s.io/utils/pointer"
)
type fakeNodeInterface struct {
node api.Node
}
func (fake *fakeNodeInterface) Get(hostname string, options metav1.GetOptions) (*api.Node, error) {
return &fake.node, nil
}
type fakeIPTablesVersioner struct {
version string // what to return
err error // what to return

View File

@ -146,7 +146,7 @@ func ValidateDiscoveryBootstrapToken(b *kubeadm.BootstrapTokenDiscovery, fldPath
}
if len(b.CACertHashes) == 0 && !b.UnsafeSkipCAVerification {
allErrs = append(allErrs, field.Invalid(fldPath, "", "using token-based discovery without caCertHashes can be unsafe. Set unsafeSkipCAVerification to continue"))
allErrs = append(allErrs, field.Invalid(fldPath, "", "using token-based discovery without caCertHashes can be unsafe. Set unsafeSkipCAVerification as true in your kubeadm config file or pass --discovery-token-unsafe-skip-ca-verification flag to continue"))
}
allErrs = append(allErrs, ValidateToken(b.Token, fldPath.Child(kubeadmcmdoptions.TokenStr))...)

View File

@ -86,7 +86,6 @@ go_test(
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
"//cmd/kubeadm/app/cmd/options:go_default_library",
"//cmd/kubeadm/app/componentconfigs:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",

View File

@ -12,6 +12,7 @@ go_library(
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/cmd/alpha",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/v1beta2:go_default_library",
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
@ -20,7 +21,6 @@ go_library(
"//cmd/kubeadm/app/cmd/util:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/features:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/phases/certs/renewal:go_default_library",
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
"//cmd/kubeadm/app/phases/kubelet:go_default_library",

View File

@ -19,14 +19,16 @@ package alpha
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
@ -36,14 +38,16 @@ import (
var (
genericCertRenewLongDesc = normalizer.LongDesc(`
Renew the %[1]s, and save them into %[2]s.cert and %[2]s.key files.
Renew the %s.
Extra attributes such as SANs will be based on the existing certificates, there is no need to resupply them.
`)
genericCertRenewEmbeddedLongDesc = normalizer.LongDesc(`
Renew the certificate embedded in the kubeconfig file %s.
Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will
be based on the existing file/certificates, there is no need to resupply them.
Kubeconfig attributes and certificate extra attributes such as SANs will be based on the existing kubeconfig/certificates, there is no need to resupply them.
Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative
it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request.
After renewal, in order to make changes effective, is is required to restart control-plane components and
eventually re-distribute the renewed certificate in case the file is used elsewhere.
`)
allLongDesc = normalizer.LongDesc(`
@ -78,17 +82,17 @@ func newCmdCertsRenewal() *cobra.Command {
return cmd
}
type renewConfig struct {
type renewFlags struct {
cfgPath string
kubeconfigPath string
cfg kubeadmapiv1beta2.InitConfiguration
useAPI bool
useCSR bool
csrOnly bool
csrPath string
}
func getRenewSubCommands(kdir string) []*cobra.Command {
cfg := &renewConfig{
flags := &renewFlags{
cfg: kubeadmapiv1beta2.InitConfiguration{
ClusterConfiguration: kubeadmapiv1beta2.ClusterConfiguration{
// Setting kubernetes version to a default value in order to allow a not necessary internet lookup
@ -97,45 +101,28 @@ func getRenewSubCommands(kdir string) []*cobra.Command {
},
}
// Default values for the cobra help text
kubeadmscheme.Scheme.Default(&cfg.cfg)
kubeadmscheme.Scheme.Default(&flags.cfg)
certTree, err := certsphase.GetDefaultCertList().AsMap().CertTree()
// Get a renewal manager for a generic Cluster configuration, that is used only for getting
// the list of certificates for building subcommands
rm, err := renewal.NewManager(&kubeadmapi.ClusterConfiguration{}, "")
kubeadmutil.CheckErr(err)
cmdList := []*cobra.Command{}
funcList := []func(){}
for caCert, certs := range certTree {
// Don't offer to renew CAs; would cause serious consequences
for _, cert := range certs {
// get the cobra.Command skeleton for this command
cmd := generateCertRenewalCommand(cert, cfg)
// get the implementation of renewing this certificate
renewalFunc := func(cert *certsphase.KubeadmCert, caCert *certsphase.KubeadmCert) func() {
return func() { renewCert(cert, caCert, cfg) }
}(cert, caCert)
// install the implementation into the command
cmd.Run = func(*cobra.Command, []string) { renewalFunc() }
cmdList = append(cmdList, cmd)
// Collect renewal functions for `renew all`
funcList = append(funcList, renewalFunc)
}
}
kubeconfigs := []string{
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
//NB. we are escluding KubeletKubeConfig from renewal because management of this certificate is delegated to kubelet
}
for _, k := range kubeconfigs {
for _, handler := range rm.Certificates() {
// get the cobra.Command skeleton for this command
cmd := generateEmbeddedCertRenewalCommand(k, cfg)
cmd := &cobra.Command{
Use: handler.Name,
Short: fmt.Sprintf("Renew the %s", handler.LongName),
Long: fmt.Sprintf(genericCertRenewLongDesc, handler.LongName),
}
addFlags(cmd, flags)
// get the implementation of renewing this certificate
renewalFunc := func(kdir, k string) func() {
return func() { renewEmbeddedCert(kdir, k, cfg) }
}(kdir, k)
renewalFunc := func(handler *renewal.CertificateRenewHandler) func() {
return func() { renewCert(flags, kdir, handler) }
}(handler)
// install the implementation into the command
cmd.Run = func(*cobra.Command, []string) { renewalFunc() }
cmdList = append(cmdList, cmd)
@ -153,134 +140,60 @@ func getRenewSubCommands(kdir string) []*cobra.Command {
}
},
}
addFlags(allCmd, cfg)
addFlags(allCmd, flags)
cmdList = append(cmdList, allCmd)
return cmdList
}
func addFlags(cmd *cobra.Command, cfg *renewConfig) {
options.AddConfigFlag(cmd.Flags(), &cfg.cfgPath)
options.AddCertificateDirFlag(cmd.Flags(), &cfg.cfg.CertificatesDir)
options.AddKubeConfigFlag(cmd.Flags(), &cfg.kubeconfigPath)
options.AddCSRFlag(cmd.Flags(), &cfg.useCSR)
options.AddCSRDirFlag(cmd.Flags(), &cfg.csrPath)
cmd.Flags().BoolVar(&cfg.useAPI, "use-api", cfg.useAPI, "Use the Kubernetes certificate API to renew certificates")
func addFlags(cmd *cobra.Command, flags *renewFlags) {
options.AddConfigFlag(cmd.Flags(), &flags.cfgPath)
options.AddCertificateDirFlag(cmd.Flags(), &flags.cfg.CertificatesDir)
options.AddKubeConfigFlag(cmd.Flags(), &flags.kubeconfigPath)
options.AddCSRFlag(cmd.Flags(), &flags.csrOnly)
options.AddCSRDirFlag(cmd.Flags(), &flags.csrPath)
cmd.Flags().BoolVar(&flags.useAPI, "use-api", flags.useAPI, "Use the Kubernetes certificate API to renew certificates")
}
func renewCert(cert *certsphase.KubeadmCert, caCert *certsphase.KubeadmCert, cfg *renewConfig) {
internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfg.cfgPath, &cfg.cfg)
func renewCert(flags *renewFlags, kdir string, handler *renewal.CertificateRenewHandler) {
internalcfg, err := configutil.LoadOrDefaultInitConfiguration(flags.cfgPath, &flags.cfg)
kubeadmutil.CheckErr(err)
// if the renewal operation is set to generate only CSR request
if cfg.useCSR {
// trigger CSR generation in the csrPath, or if this one is missing, in the CertificateDir
path := cfg.csrPath
if path == "" {
path = cfg.cfg.CertificatesDir
// Get a renewal manager for the given cluster configuration
rm, err := renewal.NewManager(&internalcfg.ClusterConfiguration, kdir)
kubeadmutil.CheckErr(err)
// if the renewal operation is set to generate CSR request only
if flags.csrOnly {
// checks a path for storing CSR request is given
if flags.csrPath == "" {
kubeadmutil.CheckErr(errors.New("please provide a path where CSR request should be stored"))
}
err := certsphase.CreateCSR(cert, internalcfg, path)
err := rm.CreateRenewCSR(handler.Name, flags.csrPath)
kubeadmutil.CheckErr(err)
return
}
// otherwise, the renewal operation has to actually renew a certificate
var externalCA bool
switch caCert.BaseName {
case kubeadmconstants.CACertAndKeyBaseName:
// Check if an external CA is provided by the user (when the CA Cert is present but the CA Key is not)
externalCA, _ = certsphase.UsingExternalCA(&internalcfg.ClusterConfiguration)
case kubeadmconstants.FrontProxyCACertAndKeyBaseName:
// Check if an external Front-Proxy CA is provided by the user (when the Front-Proxy CA Cert is present but the Front-Proxy CA Key is not)
externalCA, _ = certsphase.UsingExternalFrontProxyCA(&internalcfg.ClusterConfiguration)
default:
externalCA = false
}
if !externalCA {
renewer, err := getRenewer(cfg, caCert.BaseName)
kubeadmutil.CheckErr(err)
err = renewal.RenewExistingCert(internalcfg.CertificatesDir, cert.BaseName, renewer)
kubeadmutil.CheckErr(err)
fmt.Printf("Certificate %s renewed\n", cert.Name)
return
}
fmt.Printf("Detected external %s, certificate %s can't be renewed\n", cert.CAName, cert.Name)
}
func renewEmbeddedCert(kdir, k string, cfg *renewConfig) {
internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfg.cfgPath, &cfg.cfg)
kubeadmutil.CheckErr(err)
// if the renewal operation is set to generate only CSR request
if cfg.useCSR {
// trigger CSR generation in the csrPath, or if this one is missing, in the CertificateDir
path := cfg.csrPath
if path == "" {
path = cfg.cfg.CertificatesDir
}
err := certsphase.CreateCSR(nil, internalcfg, path)
kubeadmutil.CheckErr(err)
return
}
// otherwise, the renewal operation has to actually renew a certificate
// Check if an external CA is provided by the user (when the CA Cert is present but the CA Key is not)
externalCA, _ := certsphase.UsingExternalCA(&internalcfg.ClusterConfiguration)
if !externalCA {
renewer, err := getRenewer(cfg, certsphase.KubeadmCertRootCA.BaseName)
kubeadmutil.CheckErr(err)
err = renewal.RenewEmbeddedClientCert(kdir, k, renewer)
kubeadmutil.CheckErr(err)
fmt.Printf("Certificate embedded in %s renewed\n", k)
return
}
fmt.Printf("Detected external CA, certificate embedded in %s can't be renewed\n", k)
}
func generateCertRenewalCommand(cert *certsphase.KubeadmCert, cfg *renewConfig) *cobra.Command {
cmd := &cobra.Command{
Use: cert.Name,
Short: fmt.Sprintf("Renew the %s", cert.LongName),
Long: fmt.Sprintf(genericCertRenewLongDesc, cert.LongName, cert.BaseName),
}
addFlags(cmd, cfg)
return cmd
}
func generateEmbeddedCertRenewalCommand(k string, cfg *renewConfig) *cobra.Command {
cmd := &cobra.Command{
Use: k,
Short: fmt.Sprintf("Renew the certificate embedded in %s", k),
Long: fmt.Sprintf(genericCertRenewEmbeddedLongDesc, k),
}
addFlags(cmd, cfg)
return cmd
}
func getRenewer(cfg *renewConfig, caCertBaseName string) (renewal.Interface, error) {
if cfg.useAPI {
kubeConfigPath := cmdutil.GetKubeConfigPath(cfg.kubeconfigPath)
// renew the certificate using the requested renew method
if flags.useAPI {
// renew using K8s certificate API
kubeConfigPath := cmdutil.GetKubeConfigPath(flags.kubeconfigPath)
client, err := kubeconfigutil.ClientSetFromFile(kubeConfigPath)
if err != nil {
return nil, err
kubeadmutil.CheckErr(err)
err = rm.RenewUsingCSRAPI(handler.Name, client)
kubeadmutil.CheckErr(err)
} else {
// renew using local certificate authorities.
// this operation can't complete in case the certificate key is not provided (external CA)
renewed, err := rm.RenewUsingLocalCA(handler.Name)
kubeadmutil.CheckErr(err)
if !renewed {
fmt.Printf("Detected external %s, %s can't be renewed\n", handler.CABaseName, handler.LongName)
return
}
return renewal.NewCertsAPIRenawal(client), nil
}
caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.cfg.CertificatesDir, caCertBaseName)
if err != nil {
return nil, err
}
return renewal.NewFileRenewal(caCert, caKey), nil
fmt.Printf("%s renewed\n", handler.LongName)
}

View File

@ -55,6 +55,10 @@ func TestCommandsGenerated(t *testing.T) {
"renew etcd-server",
"renew etcd-peer",
"renew etcd-healthcheck-client",
"renew admin.conf",
"renew scheduler.conf",
"renew controller-manager.conf",
}
renewCmd := newCmdCertsRenewal()
@ -79,19 +83,63 @@ func TestCommandsGenerated(t *testing.T) {
}
func TestRunRenewCommands(t *testing.T) {
tmpDir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpDir)
cfg := testutil.GetDefaultInternalConfig(t)
cfg.CertificatesDir = tmpDir
// Generate all the CA
CACerts := map[string]*x509.Certificate{}
CAKeys := map[string]crypto.Signer{}
for _, ca := range []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
&certsphase.KubeadmCertFrontProxyCA,
&certsphase.KubeadmCertEtcdCA,
} {
caCert, caKey, err := ca.CreateAsCA(cfg)
if err != nil {
t.Fatalf("couldn't write out CA %s: %v", ca.Name, err)
}
CACerts[ca.Name] = caCert
CAKeys[ca.Name] = caKey
}
// Generate all the signed certificates
for _, cert := range []*certsphase.KubeadmCert{
&certsphase.KubeadmCertAPIServer,
&certsphase.KubeadmCertKubeletClient,
&certsphase.KubeadmCertFrontProxyClient,
&certsphase.KubeadmCertEtcdAPIClient,
&certsphase.KubeadmCertEtcdServer,
&certsphase.KubeadmCertEtcdPeer,
&certsphase.KubeadmCertEtcdHealthcheck,
} {
caCert := CACerts[cert.CAName]
caKey := CAKeys[cert.CAName]
if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil {
t.Fatalf("couldn't write certificate %s: %v", cert.Name, err)
}
}
// Generate all the kubeconfig files with embedded certs
for _, kubeConfig := range []string{
kubeadmconstants.AdminKubeConfigFileName,
kubeadmconstants.SchedulerKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
} {
if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpDir, cfg); err != nil {
t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err)
}
}
tests := []struct {
command string
CAs []*certsphase.KubeadmCert
Certs []*certsphase.KubeadmCert
KubeconfigFiles []string
}{
{
command: "all",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
&certsphase.KubeadmCertFrontProxyCA,
&certsphase.KubeadmCertEtcdCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertAPIServer,
&certsphase.KubeadmCertKubeletClient,
@ -109,90 +157,60 @@ func TestRunRenewCommands(t *testing.T) {
},
{
command: "apiserver",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertAPIServer,
},
},
{
command: "apiserver-kubelet-client",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertKubeletClient,
},
},
{
command: "apiserver-etcd-client",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdAPIClient,
},
},
{
command: "front-proxy-client",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertFrontProxyCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertFrontProxyClient,
},
},
{
command: "etcd-server",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdServer,
},
},
{
command: "etcd-peer",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdPeer,
},
},
{
command: "etcd-healthcheck-client",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdCA,
},
Certs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertEtcdHealthcheck,
},
},
{
command: "admin.conf",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
},
KubeconfigFiles: []string{
kubeadmconstants.AdminKubeConfigFileName,
},
},
{
command: "scheduler.conf",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
},
KubeconfigFiles: []string{
kubeadmconstants.SchedulerKubeConfigFileName,
},
},
{
command: "controller-manager.conf",
CAs: []*certsphase.KubeadmCert{
&certsphase.KubeadmCertRootCA,
},
KubeconfigFiles: []string{
kubeadmconstants.ControllerManagerKubeConfigFileName,
},
@ -201,74 +219,43 @@ func TestRunRenewCommands(t *testing.T) {
for _, test := range tests {
t.Run(test.command, func(t *testing.T) {
tmpDir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpDir)
cfg := testutil.GetDefaultInternalConfig(t)
cfg.CertificatesDir = tmpDir
// Generate all the CA
CACerts := map[string]*x509.Certificate{}
CAKeys := map[string]crypto.Signer{}
for _, ca := range test.CAs {
caCert, caKey, err := ca.CreateAsCA(cfg)
if err != nil {
t.Fatalf("couldn't write out CA %s: %v", ca.Name, err)
}
CACerts[ca.Name] = caCert
CAKeys[ca.Name] = caKey
}
// Generate all the signed certificates (and store creation time)
createTime := map[string]time.Time{}
// Get file ModTime before renew
ModTime := map[string]time.Time{}
for _, cert := range test.Certs {
caCert := CACerts[cert.CAName]
caKey := CAKeys[cert.CAName]
if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil {
t.Fatalf("couldn't write certificate %s: %v", cert.Name, err)
}
file, err := os.Stat(filepath.Join(tmpDir, fmt.Sprintf("%s.crt", cert.BaseName)))
if err != nil {
t.Fatalf("couldn't get certificate %s: %v", cert.Name, err)
}
createTime[cert.Name] = file.ModTime()
ModTime[cert.Name] = file.ModTime()
}
// Generate all the kubeconfig files with embedded certs(and store creation time)
for _, kubeConfig := range test.KubeconfigFiles {
if err := kubeconfigphase.CreateKubeConfigFile(kubeConfig, tmpDir, cfg); err != nil {
t.Fatalf("couldn't create kubeconfig %q: %v", kubeConfig, err)
}
file, err := os.Stat(filepath.Join(tmpDir, kubeConfig))
if err != nil {
t.Fatalf("couldn't get kubeconfig %s: %v", kubeConfig, err)
}
createTime[kubeConfig] = file.ModTime()
ModTime[kubeConfig] = file.ModTime()
}
// exec renew
renewCmds := getRenewSubCommands(tmpDir)
cmdtestutil.RunSubCommand(t, renewCmds, test.command, fmt.Sprintf("--cert-dir=%s", tmpDir))
// read renewed certificates and check the file is modified
// check the file is modified
for _, cert := range test.Certs {
file, err := os.Stat(filepath.Join(tmpDir, fmt.Sprintf("%s.crt", cert.BaseName)))
if err != nil {
t.Fatalf("couldn't get certificate %s: %v", cert.Name, err)
}
if createTime[cert.Name] == file.ModTime() {
if ModTime[cert.Name] == file.ModTime() {
t.Errorf("certificate %s was not renewed as expected", cert.Name)
}
}
// ead renewed kubeconfig files and check the file is modified
for _, kubeConfig := range test.KubeconfigFiles {
file, err := os.Stat(filepath.Join(tmpDir, kubeConfig))
if err != nil {
t.Fatalf("couldn't get kubeconfig %s: %v", kubeConfig, err)
}
if createTime[kubeConfig] == file.ModTime() {
if ModTime[kubeConfig] == file.ModTime() {
t.Errorf("kubeconfig %s was not renewed as expected", kubeConfig)
}
}
@ -281,10 +268,22 @@ func TestRenewUsingCSR(t *testing.T) {
defer os.RemoveAll(tmpDir)
cert := &certs.KubeadmCertEtcdServer
renewCmds := getRenewSubCommands(tmpDir)
cmdtestutil.RunSubCommand(t, renewCmds, cert.Name, "--csr-only", "--csr-dir="+tmpDir)
cfg := testutil.GetDefaultInternalConfig(t)
cfg.CertificatesDir = tmpDir
if _, _, err := pkiutil.TryLoadCSRAndKeyFromDisk(tmpDir, cert.BaseName); err != nil {
t.Fatalf("couldn't load certificate %q: %v", cert.BaseName, err)
caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(cfg)
if err != nil {
t.Fatalf("couldn't write out CA %s: %v", certsphase.KubeadmCertEtcdCA.Name, err)
}
if err := cert.CreateFromCA(cfg, caCert, caKey); err != nil {
t.Fatalf("couldn't write certificate %s: %v", cert.Name, err)
}
renewCmds := getRenewSubCommands(tmpDir)
cmdtestutil.RunSubCommand(t, renewCmds, cert.Name, "--csr-only", "--csr-dir="+tmpDir, fmt.Sprintf("--cert-dir=%s", tmpDir))
if _, _, err := pkiutil.TryLoadCSRAndKeyFromDisk(tmpDir, cert.Name); err != nil {
t.Fatalf("couldn't load certificate %q: %v", cert.Name, err)
}
}

View File

@ -84,7 +84,7 @@ func NewKubeadmCommand(in io.Reader, out, err io.Writer) *cobra.Command {
cmds.AddCommand(NewCmdConfig(out))
cmds.AddCommand(NewCmdInit(out, nil))
cmds.AddCommand(NewCmdJoin(out, nil))
cmds.AddCommand(NewCmdReset(in, out))
cmds.AddCommand(NewCmdReset(in, out, nil))
cmds.AddCommand(NewCmdVersion(out))
cmds.AddCommand(NewCmdToken(out, err))
cmds.AddCommand(upgrade.NewCmdUpgrade(out))

View File

@ -334,7 +334,7 @@ func newInitData(cmd *cobra.Command, args []string, options *initOptions, out io
// if dry running creates a temporary folder for saving kubeadm generated files
dryRunDir := ""
if options.dryRun {
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("kubeadm-init-dryrun"); err != nil {
if dryRunDir, err = kubeadmconstants.CreateTempDirForKubeadm("", "kubeadm-init-dryrun"); err != nil {
return nil, errors.Wrap(err, "couldn't create a temporary directory")
}
}

View File

@ -127,4 +127,7 @@ const (
// SkipCertificateKeyPrint flag instruct kubeadm to skip printing certificate key used to encrypt certs by 'kubeadm init'.
SkipCertificateKeyPrint = "skip-certificate-key-print"
// ForceReset flag instruct kubeadm to reset the node without prompting for confirmation
ForceReset = "force"
)

View File

@ -37,6 +37,7 @@ go_library(
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//vendor/github.com/lithammer/dedent:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],

View File

@ -20,6 +20,8 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
@ -58,6 +60,7 @@ func NewControlPlaneJoinPhase() workflow.Phase {
Short: "Join a machine as a control plane instance",
InheritFlags: getControlPlaneJoinPhaseFlags("all"),
RunAllSiblings: true,
ArgsValidator: cobra.NoArgs,
},
newEtcdLocalSubphase(),
newUpdateStatusSubphase(),
@ -68,10 +71,11 @@ func NewControlPlaneJoinPhase() workflow.Phase {
func newEtcdLocalSubphase() workflow.Phase {
return workflow.Phase{
Name: "etcd",
Short: "Add a new local etcd member",
Run: runEtcdPhase,
InheritFlags: getControlPlaneJoinPhaseFlags("etcd"),
Name: "etcd",
Short: "Add a new local etcd member",
Run: runEtcdPhase,
InheritFlags: getControlPlaneJoinPhaseFlags("etcd"),
ArgsValidator: cobra.NoArgs,
}
}
@ -83,17 +87,19 @@ func newUpdateStatusSubphase() workflow.Phase {
kubeadmconstants.ClusterStatusConfigMapKey,
kubeadmconstants.KubeadmConfigConfigMap,
),
Run: runUpdateStatusPhase,
InheritFlags: getControlPlaneJoinPhaseFlags("update-status"),
Run: runUpdateStatusPhase,
InheritFlags: getControlPlaneJoinPhaseFlags("update-status"),
ArgsValidator: cobra.NoArgs,
}
}
func newMarkControlPlaneSubphase() workflow.Phase {
return workflow.Phase{
Name: "mark-control-plane",
Short: "Mark a node as a control-plane",
Run: runMarkControlPlanePhase,
InheritFlags: getControlPlaneJoinPhaseFlags("mark-control-plane"),
Name: "mark-control-plane",
Short: "Mark a node as a control-plane",
Run: runMarkControlPlanePhase,
InheritFlags: getControlPlaneJoinPhaseFlags("mark-control-plane"),
ArgsValidator: cobra.NoArgs,
}
}

View File

@ -20,6 +20,7 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
@ -157,10 +158,11 @@ func newControlPlanePrepareKubeconfigSubphase() workflow.Phase {
func newControlPlanePrepareControlPlaneSubphase() workflow.Phase {
return workflow.Phase{
Name: "control-plane",
Short: "Generate the manifests for the new control plane components",
Run: runControlPlanePrepareControlPlaneSubphase, //NB. eventually in future we would like to break down this in sub phases for each component
InheritFlags: getControlPlanePreparePhaseFlags("control-plane"),
Name: "control-plane",
Short: "Generate the manifests for the new control plane components",
Run: runControlPlanePrepareControlPlaneSubphase, //NB. eventually in future we would like to break down this in sub phases for each component
InheritFlags: getControlPlanePreparePhaseFlags("control-plane"),
ArgsValidator: cobra.NoArgs,
}
}

View File

@ -16,7 +16,10 @@ limitations under the License.
package workflow
import "github.com/spf13/pflag"
import (
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// Phase provides an implementation of a workflow phase that allows
// creation of new phases by simply instantiating a variable of this type.
@ -71,6 +74,10 @@ type Phase struct {
// Nb. if two or phases have the same local flags, please consider using local flags in the parent command
// or additional flags defined in the phase runner.
LocalFlags *pflag.FlagSet
// ArgsValidator defines the positional arg function to be used for validating args for this phase
// If not set a phase will adopt the args of the top level command.
ArgsValidator cobra.PositionalArgs
}
// AppendPhase adds the given phase to the nested, ordered sequence of phases.

View File

@ -372,6 +372,12 @@ func (e *Runner) BindToCommand(cmd *cobra.Command) {
// if this phase has children (not a leaf) it doesn't accept any args
if len(p.Phases) > 0 {
phaseCmd.Args = cobra.NoArgs
} else {
if p.ArgsValidator == nil {
phaseCmd.Args = cmd.Args
} else {
phaseCmd.Args = p.ArgsValidator
}
}
// adds the command to parent

View File

@ -297,6 +297,134 @@ func phaseBuilder5(name string, flags *pflag.FlagSet) Phase {
}
}
type argTest struct {
args cobra.PositionalArgs
pass []string
fail []string
}
func phaseBuilder6(name string, args cobra.PositionalArgs, phases ...Phase) Phase {
return Phase{
Name: name,
Short: fmt.Sprintf("long description for %s ...", name),
Phases: phases,
ArgsValidator: args,
}
}
// customArgs is a custom cobra.PositionArgs function
func customArgs(cmd *cobra.Command, args []string) error {
for _, a := range args {
if a != "qux" {
return fmt.Errorf("arg %s does not equal qux", a)
}
}
return nil
}
func TestBindToCommandArgRequirements(t *testing.T) {
// because cobra.ExactArgs(1) == cobra.ExactArgs(3), it is needed
// to run test argument sets that both pass and fail to ensure the correct function was set.
var usecases = []struct {
name string
runner Runner
testCases map[string]argTest
cmd *cobra.Command
}{
{
name: "leaf command, no defined args, follow parent",
runner: Runner{
Phases: []Phase{phaseBuilder("foo")},
},
testCases: map[string]argTest{
"phase foo": {
pass: []string{"one", "two", "three"},
fail: []string{"one", "two"},
args: cobra.ExactArgs(3),
},
},
cmd: &cobra.Command{
Use: "init",
Args: cobra.ExactArgs(3),
},
},
{
name: "container cmd expect none, custom arg check for leaf",
runner: Runner{
Phases: []Phase{phaseBuilder6("foo", cobra.NoArgs,
phaseBuilder6("bar", cobra.ExactArgs(1)),
phaseBuilder6("baz", customArgs),
)},
},
testCases: map[string]argTest{
"phase foo": {
pass: []string{},
fail: []string{"one"},
args: cobra.NoArgs,
},
"phase foo bar": {
pass: []string{"one"},
fail: []string{"one", "two"},
args: cobra.ExactArgs(1),
},
"phase foo baz": {
pass: []string{"qux"},
fail: []string{"one"},
args: customArgs,
},
},
cmd: &cobra.Command{
Use: "init",
Args: cobra.NoArgs,
},
},
}
for _, rt := range usecases {
t.Run(rt.name, func(t *testing.T) {
rt.runner.BindToCommand(rt.cmd)
// Checks that cmd gets a new phase subcommand
phaseCmd := getCmd(rt.cmd, "phase")
if phaseCmd == nil {
t.Error("cmd didn't have phase subcommand\n")
return
}
for c, args := range rt.testCases {
cCmd := getCmd(rt.cmd, c)
if cCmd == nil {
t.Errorf("cmd didn't have %s subcommand\n", c)
continue
}
// Ensure it is the expected function
if reflect.ValueOf(cCmd.Args).Pointer() != reflect.ValueOf(args.args).Pointer() {
t.Error("The function poiners where not equal.")
}
// Test passing argument set
err := cCmd.Args(cCmd, args.pass)
if err != nil {
t.Errorf("command %s should validate the args: %v\n %v", cCmd.Name(), args.pass, err)
}
// Test failing argument set
err = cCmd.Args(cCmd, args.fail)
if err == nil {
t.Errorf("command %s should fail to validate the args: %v\n %v", cCmd.Name(), args.pass, err)
}
}
})
}
}
func TestBindToCommand(t *testing.T) {
var dummy string

View File

@ -28,6 +28,7 @@ import (
"github.com/lithammer/dedent"
"github.com/pkg/errors"
"github.com/spf13/cobra"
flag "github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
@ -35,6 +36,7 @@ import (
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd"
@ -48,99 +50,197 @@ import (
utilsexec "k8s.io/utils/exec"
)
// NewCmdReset returns the "kubeadm reset" command
func NewCmdReset(in io.Reader, out io.Writer) *cobra.Command {
var certsDir string
// resetOptions defines all the options exposed via flags by kubeadm reset.
type resetOptions struct {
certificatesDir string
criSocketPath string
forceReset bool
ignorePreflightErrors []string
kubeconfigPath string
}
// resetData defines all the runtime information used when running the kubeadm reset worklow;
// this data is shared across all the phases that are included in the workflow.
type resetData struct {
certificatesDir string
client clientset.Interface
criSocketPath string
forceReset bool
ignorePreflightErrors sets.String
inputReader io.Reader
outputWriter io.Writer
cfg *kubeadmapi.InitConfiguration
}
// newResetOptions returns a struct ready for being used for creating cmd join flags.
func newResetOptions() *resetOptions {
return &resetOptions{
certificatesDir: kubeadmapiv1beta2.DefaultCertificatesDir,
forceReset: false,
kubeconfigPath: kubeadmconstants.GetAdminKubeConfigPath(),
}
}
// newResetData returns a new resetData struct to be used for the execution of the kubeadm reset workflow.
func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out io.Writer) (*resetData, error) {
var cfg *kubeadmapi.InitConfiguration
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(options.ignorePreflightErrors)
if err != nil {
return nil, err
}
client, err := getClientset(options.kubeconfigPath, false)
if err == nil {
klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", options.kubeconfigPath)
cfg, err = configutil.FetchInitConfigurationFromCluster(client, out, "reset", false)
if err != nil {
klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err)
}
} else {
klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", options.kubeconfigPath)
}
var criSocketPath string
var ignorePreflightErrors []string
var forceReset bool
var client clientset.Interface
kubeConfigFile := kubeadmconstants.GetAdminKubeConfigPath()
if options.criSocketPath == "" {
criSocketPath, err = resetDetectCRISocket(cfg)
if err != nil {
return nil, err
}
klog.V(1).Infof("[reset] Detected and using CRI socket: %s", criSocketPath)
}
return &resetData{
certificatesDir: options.certificatesDir,
client: client,
criSocketPath: criSocketPath,
forceReset: options.forceReset,
ignorePreflightErrors: ignorePreflightErrorsSet,
inputReader: in,
outputWriter: out,
cfg: cfg,
}, nil
}
// AddResetFlags adds reset flags
func AddResetFlags(flagSet *flag.FlagSet, resetOptions *resetOptions) {
flagSet.StringVar(
&resetOptions.certificatesDir, options.CertificatesDir, resetOptions.certificatesDir,
`The path to the directory where the certificates are stored. If specified, clean this directory.`,
)
flagSet.BoolVarP(
&resetOptions.forceReset, options.ForceReset, "f", false,
"Reset the node without prompting for confirmation.",
)
options.AddKubeConfigFlag(flagSet, &resetOptions.kubeconfigPath)
options.AddIgnorePreflightErrorsFlag(flagSet, &resetOptions.ignorePreflightErrors)
cmdutil.AddCRISocketFlag(flagSet, &resetOptions.criSocketPath)
}
// NewCmdReset returns the "kubeadm reset" command
func NewCmdReset(in io.Reader, out io.Writer, resetOptions *resetOptions) *cobra.Command {
if resetOptions == nil {
resetOptions = newResetOptions()
}
resetRunner := workflow.NewRunner()
cmd := &cobra.Command{
Use: "reset",
Short: "Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'",
Run: func(cmd *cobra.Command, args []string) {
ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors)
c, err := resetRunner.InitData(args)
kubeadmutil.CheckErr(err)
var cfg *kubeadmapi.InitConfiguration
client, err = getClientset(kubeConfigFile, false)
if err == nil {
klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", kubeConfigFile)
cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "reset", false)
if err != nil {
klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err)
}
} else {
klog.V(1).Infof("[reset] Could not obtain a client set from the kubeconfig file: %s", kubeConfigFile)
}
if criSocketPath == "" {
criSocketPath, err = resetDetectCRISocket(cfg)
kubeadmutil.CheckErr(err)
klog.V(1).Infof("[reset] Detected and using CRI socket: %s", criSocketPath)
}
r, err := NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath)
err = resetRunner.Run(args)
kubeadmutil.CheckErr(err)
kubeadmutil.CheckErr(r.Run(out, client, cfg))
// TODO: remove this once we have all phases in place.
// the method joinData.Run() itself should be removed too.
data := c.(*resetData)
kubeadmutil.CheckErr(data.Run())
},
}
options.AddIgnorePreflightErrorsFlag(cmd.PersistentFlags(), &ignorePreflightErrors)
options.AddKubeConfigFlag(cmd.PersistentFlags(), &kubeConfigFile)
AddResetFlags(cmd.Flags(), resetOptions)
cmd.PersistentFlags().StringVar(
&certsDir, "cert-dir", kubeadmapiv1beta2.DefaultCertificatesDir,
"The path to the directory where the certificates are stored. If specified, clean this directory.",
)
// initialize the workflow runner with the list of phases
// TODO: append phases here
cmdutil.AddCRISocketFlag(cmd.PersistentFlags(), &criSocketPath)
// sets the data builder function, that will be used by the runner
// both when running the entire workflow or single phases
resetRunner.SetDataInitializer(func(cmd *cobra.Command, args []string) (workflow.RunData, error) {
return newResetData(cmd, resetOptions, in, out)
})
cmd.PersistentFlags().BoolVarP(
&forceReset, "force", "f", false,
"Reset the node without prompting for confirmation.",
)
// binds the Runner to kubeadm init command by altering
// command help, adding --skip-phases flag and by adding phases subcommands
resetRunner.BindToCommand(cmd)
return cmd
}
// Reset defines struct used for kubeadm reset command
type Reset struct {
certsDir string
criSocketPath string
// Cfg returns the InitConfiguration.
func (r *resetData) Cfg() *kubeadmapi.InitConfiguration {
return r.cfg
}
// NewReset instantiate Reset struct
func NewReset(in io.Reader, ignorePreflightErrors sets.String, forceReset bool, certsDir, criSocketPath string) (*Reset, error) {
if !forceReset {
// CertificatesDir returns the CertificatesDir.
func (r *resetData) CertificatesDir() string {
return r.certificatesDir
}
// Client returns the Client for accessing the cluster.
func (r *resetData) Client() clientset.Interface {
return r.client
}
// ForceReset returns the forceReset flag.
func (r *resetData) ForceReset() bool {
return r.forceReset
}
// InputReader returns the io.reader used to read messages.
func (r *resetData) InputReader() io.Reader {
return r.inputReader
}
// IgnorePreflightErrors returns the list of preflight errors to ignore.
func (r *resetData) IgnorePreflightErrors() sets.String {
return r.ignorePreflightErrors
}
func (r *resetData) preflight() error {
if !r.ForceReset() {
fmt.Println("[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.")
fmt.Print("[reset] Are you sure you want to proceed? [y/N]: ")
s := bufio.NewScanner(in)
s := bufio.NewScanner(r.InputReader())
s.Scan()
if err := s.Err(); err != nil {
return nil, err
return err
}
if strings.ToLower(s.Text()) != "y" {
return nil, errors.New("Aborted reset operation")
return errors.New("Aborted reset operation")
}
}
fmt.Println("[preflight] Running pre-flight checks")
if err := preflight.RunRootCheckOnly(ignorePreflightErrors); err != nil {
return nil, err
if err := preflight.RunRootCheckOnly(r.IgnorePreflightErrors()); err != nil {
return err
}
return &Reset{
certsDir: certsDir,
criSocketPath: criSocketPath,
}, nil
return nil
}
// Run reverts any changes made to this host by "kubeadm init" or "kubeadm join".
func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.InitConfiguration) error {
func (r *resetData) Run() error {
var dirsToClean []string
cfg := r.Cfg()
certsDir := r.CertificatesDir()
client := r.Client()
err := r.preflight()
if err != nil {
return err
}
// Reset the ClusterStatus for a given control-plane node.
if isControlPlane() && cfg != nil {
@ -203,10 +303,10 @@ func (r *Reset) Run(out io.Writer, client clientset.Interface, cfg *kubeadmapi.I
// Remove contents from the config and pki directories
klog.V(1).Infoln("[reset] Removing contents from the config and pki directories")
if r.certsDir != kubeadmapiv1beta2.DefaultCertificatesDir {
klog.Warningf("[reset] WARNING: Cleaning a non-default certificates directory: %q\n", r.certsDir)
if certsDir != kubeadmapiv1beta2.DefaultCertificatesDir {
klog.Warningf("[reset] WARNING: Cleaning a non-default certificates directory: %q\n", certsDir)
}
resetConfigDir(kubeadmconstants.KubernetesDir, r.certsDir)
resetConfigDir(kubeadmconstants.KubernetesDir, certsDir)
// Output help text instructing user how to remove iptables rules
msg := dedent.Dedent(`

View File

@ -17,7 +17,6 @@ limitations under the License.
package cmd
import (
"io"
"io/ioutil"
"os"
"path/filepath"
@ -26,8 +25,6 @@ import (
"github.com/lithammer/dedent"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
@ -85,21 +82,6 @@ func assertDirEmpty(t *testing.T, path string) {
}
}
func TestNewReset(t *testing.T) {
var in io.Reader
certsDir := kubeadmapiv1beta2.DefaultCertificatesDir
criSocketPath := kubeadmconstants.DefaultDockerCRISocket
forceReset := true
ignorePreflightErrors := []string{"all"}
ignorePreflightErrorsSet, _ := validation.ValidateIgnorePreflightErrors(ignorePreflightErrors)
NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath)
ignorePreflightErrors = []string{}
ignorePreflightErrorsSet, _ = validation.ValidateIgnorePreflightErrors(ignorePreflightErrors)
NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath)
}
func TestConfigDirCleaner(t *testing.T) {
tests := map[string]struct {
resetDir string

View File

@ -42,6 +42,7 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/options"
phaseutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
@ -215,6 +216,12 @@ func RunCreateToken(out io.Writer, client clientset.Interface, cfgPath string, c
// This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags
klog.V(1).Infoln("[token] loading configurations")
// In fact, we don't do any CRI ops at all.
// This is just to force skipping the CRI detection.
// Ref: https://github.com/kubernetes/kubeadm/issues/1559
cfg.NodeRegistration.CRISocket = kubeadmconstants.DefaultDockerCRISocket
internalcfg, err := configutil.LoadOrDefaultInitConfiguration(cfgPath, cfg)
if err != nil {
return err

View File

@ -58,7 +58,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/upgrade:go_default_library",
],
)

View File

@ -238,14 +238,14 @@ func PerformControlPlaneUpgrade(flags *applyFlags, client clientset.Interface, w
}
// GetPathManagerForUpgrade returns a path manager properly configured for the given InitConfiguration.
func GetPathManagerForUpgrade(internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade bool) (upgrade.StaticPodPathManager, error) {
func GetPathManagerForUpgrade(kubernetesDir string, internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade bool) (upgrade.StaticPodPathManager, error) {
isHAEtcd := etcdutil.CheckConfigurationIsHA(&internalcfg.Etcd)
return upgrade.NewKubeStaticPodPathManagerUsingTempDirs(constants.GetStaticPodDirectory(), true, etcdUpgrade && !isHAEtcd)
return upgrade.NewKubeStaticPodPathManagerUsingTempDirs(kubernetesDir, true, etcdUpgrade && !isHAEtcd)
}
// PerformStaticPodUpgrade performs the upgrade of the control plane components for a static pod hosted cluster
func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter, internalcfg *kubeadmapi.InitConfiguration, etcdUpgrade, renewCerts bool) error {
pathManager, err := GetPathManagerForUpgrade(internalcfg, etcdUpgrade)
pathManager, err := GetPathManagerForUpgrade(constants.KubernetesDir, internalcfg, etcdUpgrade)
if err != nil {
return err
}
@ -257,7 +257,7 @@ func PerformStaticPodUpgrade(client clientset.Interface, waiter apiclient.Waiter
// DryRunStaticPodUpgrade fakes an upgrade of the control plane
func DryRunStaticPodUpgrade(internalcfg *kubeadmapi.InitConfiguration) error {
dryRunManifestDir, err := constants.CreateTempDirForKubeadm("kubeadm-upgrade-dryrun")
dryRunManifestDir, err := constants.CreateTempDirForKubeadm("", "kubeadm-upgrade-dryrun")
if err != nil {
return err
}

View File

@ -22,7 +22,6 @@ import (
"testing"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
func TestSessionIsInteractive(t *testing.T) {
@ -114,14 +113,11 @@ func TestGetPathManagerForUpgrade(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error making temporary directory: %v", err)
}
oldK8sDir := constants.KubernetesDir
constants.KubernetesDir = tmpdir
defer func() {
constants.KubernetesDir = oldK8sDir
os.RemoveAll(tmpdir)
}()
pathmgr, err := GetPathManagerForUpgrade(test.cfg, test.etcdUpgrade)
pathmgr, err := GetPathManagerForUpgrade(tmpdir, test.cfg, test.etcdUpgrade)
if err != nil {
t.Fatalf("unexpected error creating path manager: %v", err)
}

View File

@ -33,11 +33,9 @@ import (
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
)
// KubernetesDir is the directory Kubernetes owns for storing various configuration files
// This semi-constant MUST NOT be modified during runtime. It's a variable solely for use in unit testing.
var KubernetesDir = "/etc/kubernetes"
const (
// KubernetesDir is the directory Kubernetes owns for storing various configuration files
KubernetesDir = "/etc/kubernetes"
// ManifestsSubDirName defines directory name to store manifests
ManifestsSubDirName = "manifests"
// TempDirForKubeadm defines temporary directory for kubeadm
@ -448,8 +446,12 @@ func AddSelfHostedPrefix(componentName string) string {
}
// CreateTempDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp (not using /tmp as that would potentially be dangerous)
func CreateTempDirForKubeadm(dirName string) (string, error) {
func CreateTempDirForKubeadm(kubernetesDir, dirName string) (string, error) {
tempDir := path.Join(KubernetesDir, TempDirForKubeadm)
if len(kubernetesDir) != 0 {
tempDir = path.Join(kubernetesDir, TempDirForKubeadm)
}
// creates target folder if not already exists
if err := os.MkdirAll(tempDir, 0700); err != nil {
return "", errors.Wrapf(err, "failed to create directory %q", tempDir)
@ -463,8 +465,12 @@ func CreateTempDirForKubeadm(dirName string) (string, error) {
}
// CreateTimestampDirForKubeadm is a function that creates a temporary directory under /etc/kubernetes/tmp formatted with the current date
func CreateTimestampDirForKubeadm(dirName string) (string, error) {
func CreateTimestampDirForKubeadm(kubernetesDir, dirName string) (string, error) {
tempDir := path.Join(KubernetesDir, TempDirForKubeadm)
if len(kubernetesDir) != 0 {
tempDir = path.Join(kubernetesDir, TempDirForKubeadm)
}
// creates target folder if not already exists
if err := os.MkdirAll(tempDir, 0700); err != nil {
return "", errors.Wrapf(err, "failed to create directory %q", tempDir)

View File

@ -260,7 +260,7 @@ var (
// KubeadmCertKubeletClient is the definition of the cert used by the API server to access the kubelet.
KubeadmCertKubeletClient = KubeadmCert{
Name: "apiserver-kubelet-client",
LongName: "Client certificate for the API server to connect to kubelet",
LongName: "certificate for the API server to connect to kubelet",
BaseName: kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
CAName: "ca",
config: certutil.Config{
@ -284,7 +284,7 @@ var (
KubeadmCertFrontProxyClient = KubeadmCert{
Name: "front-proxy-client",
BaseName: kubeadmconstants.FrontProxyClientCertAndKeyBaseName,
LongName: "client for the front proxy",
LongName: "certificate for the front proxy client",
CAName: "front-proxy-ca",
config: certutil.Config{
CommonName: kubeadmconstants.FrontProxyClientCertCommonName,
@ -322,7 +322,7 @@ var (
// KubeadmCertEtcdPeer is the definition of the cert used by etcd peers to access each other.
KubeadmCertEtcdPeer = KubeadmCert{
Name: "etcd-peer",
LongName: "credentials for etcd nodes to communicate with each other",
LongName: "certificate for etcd nodes to communicate with each other",
BaseName: kubeadmconstants.EtcdPeerCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
@ -336,7 +336,7 @@ var (
// KubeadmCertEtcdHealthcheck is the definition of the cert used by Kubernetes to check the health of the etcd server.
KubeadmCertEtcdHealthcheck = KubeadmCert{
Name: "etcd-healthcheck-client",
LongName: "client certificate for liveness probes to healtcheck etcd",
LongName: "certificate for liveness probes to healtcheck etcd",
BaseName: kubeadmconstants.EtcdHealthcheckClientCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{
@ -348,7 +348,7 @@ var (
// KubeadmCertEtcdAPIClient is the definition of the cert used by the API server to access etcd.
KubeadmCertEtcdAPIClient = KubeadmCert{
Name: "apiserver-etcd-client",
LongName: "client apiserver uses to access etcd",
LongName: "certificate the apiserver uses to access etcd",
BaseName: kubeadmconstants.APIServerEtcdClientCertAndKeyBaseName,
CAName: "etcd-ca",
config: certutil.Config{

View File

@ -3,20 +3,24 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"certsapi.go",
"filerenewal.go",
"interface.go",
"renewal.go",
"apirenewer.go",
"filerenewer.go",
"manager.go",
"readwriter.go",
],
importpath = "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal",
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/constants:go_default_library",
"//cmd/kubeadm/app/phases/certs:go_default_library",
"//cmd/kubeadm/app/util/pkiutil:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/certificate/csr:go_default_library",
"//staging/src/k8s.io/client-go/util/keyutil:go_default_library",
@ -27,11 +31,14 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"filerenewal_test.go",
"renewal_test.go",
"apirenewer_test.go",
"filerenewer_test.go",
"manager_test.go",
"readwriter_test.go",
],
embed = [":go_default_library"],
deps = [
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
"//cmd/kubeadm/app/util/certs:go_default_library",
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//cmd/kubeadm/app/util/pkiutil:go_default_library",

View File

@ -27,7 +27,7 @@ import (
certsapi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientset "k8s.io/client-go/kubernetes"
certstype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
certutil "k8s.io/client-go/util/cert"
csrutil "k8s.io/client-go/util/certificate/csr"
@ -38,20 +38,20 @@ const certAPIPrefixName = "kubeadm-cert"
var watchTimeout = 5 * time.Minute
// CertsAPIRenewal creates new certificates using the certs API
type CertsAPIRenewal struct {
// APIRenewer define a certificate renewer implementation that uses the K8s certificate API
type APIRenewer struct {
client certstype.CertificatesV1beta1Interface
}
// NewCertsAPIRenawal takes a Kubernetes interface and returns a renewal Interface.
func NewCertsAPIRenawal(client kubernetes.Interface) Interface {
return &CertsAPIRenewal{
// NewAPIRenewer a new certificate renewer implementation that uses the K8s certificate API
func NewAPIRenewer(client clientset.Interface) *APIRenewer {
return &APIRenewer{
client: client.CertificatesV1beta1(),
}
}
// Renew takes a certificate using the cert and key.
func (r *CertsAPIRenewal) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) {
// Renew a certificate using the K8s certificate API
func (r *APIRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) {
reqTmp := &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: cfg.CommonName,

View File

@ -0,0 +1,119 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto"
"crypto/x509"
"testing"
"time"
certsapi "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
fakecerts "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake"
k8stesting "k8s.io/client-go/testing"
certutil "k8s.io/client-go/util/cert"
pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
func TestAPIRenewer(t *testing.T) {
caCertCfg := &certutil.Config{CommonName: "kubernetes"}
caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg)
if err != nil {
t.Fatalf("couldn't create CA: %v", err)
}
client := &fakecerts.FakeCertificatesV1beta1{
Fake: &k8stesting.Fake{},
}
certReq := getCertReq(t, caCert, caKey)
certReqNoCert := certReq.DeepCopy()
certReqNoCert.Status.Certificate = nil
client.AddReactor("get", "certificatesigningrequests", defaultReactionFunc(certReq))
watcher := watch.NewFakeWithChanSize(3, false)
watcher.Add(certReqNoCert)
watcher.Modify(certReqNoCert)
watcher.Modify(certReq)
client.AddWatchReactor("certificatesigningrequests", k8stesting.DefaultWatchReactor(watcher, nil))
// override the timeout so tests are faster
watchTimeout = time.Second
certCfg := &certutil.Config{
CommonName: "test-certs",
AltNames: certutil.AltNames{
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
renewer := &APIRenewer{
client: client,
}
cert, _, err := renewer.Renew(certCfg)
if err != nil {
t.Fatalf("unexpected error renewing cert: %v", err)
}
pool := x509.NewCertPool()
pool.AddCert(caCert)
_, err = cert.Verify(x509.VerifyOptions{
DNSName: "test-domain.space",
Roots: pool,
KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
if err != nil {
t.Errorf("couldn't verify new cert: %v", err)
}
}
func defaultReactionFunc(obj runtime.Object) k8stesting.ReactionFunc {
return func(act k8stesting.Action) (bool, runtime.Object, error) {
return true, obj, nil
}
}
func getCertReq(t *testing.T, caCert *x509.Certificate, caKey crypto.Signer) *certsapi.CertificateSigningRequest {
cert, _, err := pkiutil.NewCertAndKey(caCert, caKey, &certutil.Config{
CommonName: "testcert",
AltNames: certutil.AltNames{
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
})
if err != nil {
t.Fatalf("couldn't generate cert: %v", err)
}
return &certsapi.CertificateSigningRequest{
ObjectMeta: metav1.ObjectMeta{
Name: "testcert",
},
Status: certsapi.CertificateSigningRequestStatus{
Conditions: []certsapi.CertificateSigningRequestCondition{
{
Type: certsapi.CertificateApproved,
},
},
Certificate: pkiutil.EncodeCertPEM(cert),
},
}
}

View File

@ -24,21 +24,21 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
// FileRenewal renews a certificate using local certs
type FileRenewal struct {
// FileRenewer define a certificate renewer implementation that uses given CA cert and key for generating new certficiates
type FileRenewer struct {
caCert *x509.Certificate
caKey crypto.Signer
}
// NewFileRenewal takes a certificate pair to construct the Interface.
func NewFileRenewal(caCert *x509.Certificate, caKey crypto.Signer) Interface {
return &FileRenewal{
// NewFileRenewer returns a new certificate renewer that uses given CA cert and key for generating new certficiates
func NewFileRenewer(caCert *x509.Certificate, caKey crypto.Signer) *FileRenewer {
return &FileRenewer{
caCert: caCert,
caKey: caKey,
}
}
// Renew takes a certificate using the cert and key
func (r *FileRenewal) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) {
// Renew a certificate using a given CA cert and key
func (r *FileRenewer) Renew(cfg *certutil.Config) (*x509.Certificate, crypto.Signer, error) {
return pkiutil.NewCertAndKey(r.caCert, r.caKey, cfg)
}

View File

@ -21,18 +21,13 @@ import (
"testing"
certutil "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
func TestFileRenew(t *testing.T) {
caCertCfg := &certutil.Config{CommonName: "kubernetes"}
caCert, caKey, err := pkiutil.NewCertificateAuthority(caCertCfg)
if err != nil {
t.Fatalf("couldn't create CA: %v", err)
}
fr := NewFileRenewal(caCert, caKey)
func TestFileRenewer(t *testing.T) {
// creates a File renewer using a test Certificate authority
fr := NewFileRenewer(testCACert, testCAKey)
// renews a certificate
certCfg := &certutil.Config{
CommonName: "test-certs",
AltNames: certutil.AltNames{
@ -46,8 +41,9 @@ func TestFileRenew(t *testing.T) {
t.Fatalf("unexpected error renewing cert: %v", err)
}
// verify the renewed certificate
pool := x509.NewCertPool()
pool.AddCert(caCert)
pool.AddCert(testCACert)
_, err = cert.Verify(x509.VerifyOptions{
DNSName: "test-domain.space",

View File

@ -1,29 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto"
"crypto/x509"
certutil "k8s.io/client-go/util/cert"
)
// Interface represents a standard way to renew a certificate.
type Interface interface {
Renew(*certutil.Config) (*x509.Certificate, crypto.Signer, error)
}

View File

@ -0,0 +1,288 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto/x509"
"sort"
"github.com/pkg/errors"
clientset "k8s.io/client-go/kubernetes"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
// Manager can be used to coordinate certificate renewal and related processes,
// like CSR generation or checking certificate expiration
type Manager struct {
// cfg holds the kubeadm ClusterConfiguration
cfg *kubeadmapi.ClusterConfiguration
// kubernetesDir holds the directory where kubeConfig files are stored
kubernetesDir string
// certificates contains the certificateRenewHandler controlled by this manager
certificates map[string]*CertificateRenewHandler
}
// CertificateRenewHandler defines required info for renewing a certificate
type CertificateRenewHandler struct {
// Name of the certificate to be used for UX.
// This value can be used to trigger operations on this certificate
Name string
// LongName of the certificate to be used for UX
LongName string
// FileName defines the name (or the BaseName) of the certificate file
FileName string
// CABaseName define the base name for the CA that should be used for certificate renewal
CABaseName string
// readwriter define a CertificateReadWriter to be used for certificate renewal
readwriter certificateReadWriter
}
// NewManager return a new certificate renewal manager ready for handling certificates in the cluster
func NewManager(cfg *kubeadmapi.ClusterConfiguration, kubernetesDir string) (*Manager, error) {
rm := &Manager{
cfg: cfg,
kubernetesDir: kubernetesDir,
certificates: map[string]*CertificateRenewHandler{},
}
// gets the list of certificates that are expected according to the current cluster configuration
certListFunc := certsphase.GetDefaultCertList
if cfg.Etcd.External != nil {
certListFunc = certsphase.GetCertsWithoutEtcd
}
certTree, err := certListFunc().AsMap().CertTree()
if err != nil {
return nil, err
}
// create a CertificateRenewHandler for each signed certificate in the certificate tree;
// NB. we are not offering support for renewing CAs; this would cause serious consequences
for ca, certs := range certTree {
for _, cert := range certs {
// create a ReadWriter for certificates stored in the K8s local PKI
pkiReadWriter := newPKICertificateReadWriter(rm.cfg.CertificatesDir, cert.BaseName)
// adds the certificateRenewHandler.
// PKI certificates are indexed by name, that is a well know constant defined
// in the certsphase package and that can be reused across all the kubeadm codebase
rm.certificates[cert.Name] = &CertificateRenewHandler{
Name: cert.Name,
LongName: cert.LongName,
FileName: cert.BaseName,
CABaseName: ca.BaseName, //Nb. this is a path for etcd certs (they are stored in a subfolder)
readwriter: pkiReadWriter,
}
}
}
// gets the list of certificates that should be considered for renewal
kubeConfigs := []struct {
longName string
fileName string
}{
{
longName: "certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself",
fileName: kubeadmconstants.AdminKubeConfigFileName,
},
{
longName: "certificate embedded in the kubeconfig file for the controller manager to use",
fileName: kubeadmconstants.ControllerManagerKubeConfigFileName,
},
{
longName: "certificate embedded in the kubeconfig file for the scheduler manager to use",
fileName: kubeadmconstants.SchedulerKubeConfigFileName,
},
//NB. we are escluding KubeletKubeConfig from renewal because management of this certificate is delegated to kubelet
}
// create a CertificateRenewHandler for each kubeConfig file
for _, kubeConfig := range kubeConfigs {
// create a ReadWriter for certificates embedded in kubeConfig files
kubeConfigReadWriter := newKubeconfigReadWriter(kubernetesDir, kubeConfig.fileName)
// adds the certificateRenewHandler.
// Certificates embedded kubeConfig files in are indexed by fileName, that is a well know constant defined
// in the kubeadm constants package and that can be reused across all the kubeadm codebase
rm.certificates[kubeConfig.fileName] = &CertificateRenewHandler{
Name: kubeConfig.fileName, // we are using fileName as name, because there is nothing similar outside
LongName: kubeConfig.longName,
FileName: kubeConfig.fileName,
CABaseName: kubeadmconstants.CACertAndKeyBaseName, // all certificates in kubeConfig files are signed by the Kubernetes CA
readwriter: kubeConfigReadWriter,
}
}
return rm, nil
}
// Certificates return the list of certificates controlled by this Manager
func (rm *Manager) Certificates() []*CertificateRenewHandler {
certificates := []*CertificateRenewHandler{}
for _, h := range rm.certificates {
certificates = append(certificates, h)
}
sort.Slice(certificates, func(i, j int) bool { return certificates[i].Name < certificates[j].Name })
return certificates
}
// RenewUsingLocalCA executes certificate renewal using local certificate authorities for generating new certs.
// For PKI certificates, use the name defined in the certsphase package, while for certificates
// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package.
// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value.
func (rm *Manager) RenewUsingLocalCA(name string) (bool, error) {
handler, ok := rm.certificates[name]
if !ok {
return false, errors.Errorf("%s is not a valid certificate for this cluster", name)
}
// checks if the we are in the external CA case (CA certificate provided without the certificate key)
var externalCA bool
switch handler.CABaseName {
case kubeadmconstants.CACertAndKeyBaseName:
externalCA, _ = certsphase.UsingExternalCA(rm.cfg)
case kubeadmconstants.FrontProxyCACertAndKeyBaseName:
externalCA, _ = certsphase.UsingExternalFrontProxyCA(rm.cfg)
case kubeadmconstants.EtcdCACertAndKeyBaseName:
externalCA = false
default:
return false, errors.Errorf("unknown certificate authority %s", handler.CABaseName)
}
// in case of external CA it is not possible to renew certificates, then return early
if externalCA {
return false, nil
}
// reads the current certificate
cert, err := handler.readwriter.Read()
if err != nil {
return false, err
}
// extract the certificate config
cfg := certToConfig(cert)
// reads the CA
caCert, caKey, err := certsphase.LoadCertificateAuthority(rm.cfg.CertificatesDir, handler.CABaseName)
if err != nil {
return false, err
}
// create a new certificate with the same config
newCert, newKey, err := NewFileRenewer(caCert, caKey).Renew(cfg)
if err != nil {
return false, errors.Wrapf(err, "failed to renew certificate %s", name)
}
// writes the new certificate to disk
err = handler.readwriter.Write(newCert, newKey)
if err != nil {
return false, err
}
return true, nil
}
// RenewUsingCSRAPI executes certificate renewal uses the K8s certificate API.
// For PKI certificates, use the name defined in the certsphase package, while for certificates
// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package.
// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value.
func (rm *Manager) RenewUsingCSRAPI(name string, client clientset.Interface) error {
handler, ok := rm.certificates[name]
if !ok {
return errors.Errorf("%s is not a valid certificate for this cluster", name)
}
// reads the current certificate
cert, err := handler.readwriter.Read()
if err != nil {
return err
}
// extract the certificate config
cfg := certToConfig(cert)
// create a new certificate with the same config
newCert, newKey, err := NewAPIRenewer(client).Renew(cfg)
if err != nil {
return errors.Wrapf(err, "failed to renew certificate %s", name)
}
// writes the new certificate to disk
err = handler.readwriter.Write(newCert, newKey)
if err != nil {
return err
}
return nil
}
// CreateRenewCSR generates CSR request for certificate renewal.
// For PKI certificates, use the name defined in the certsphase package, while for certificates
// embedded in the kubeConfig files, use the kubeConfig file name defined in the kubeadm constants package.
// If you use the CertificateRenewHandler returned by Certificates func, handler.Name already contains the right value.
func (rm *Manager) CreateRenewCSR(name, outdir string) error {
handler, ok := rm.certificates[name]
if !ok {
return errors.Errorf("%s is not a known certificate", name)
}
// reads the current certificate
cert, err := handler.readwriter.Read()
if err != nil {
return err
}
// extracts the certificate config
cfg := certToConfig(cert)
// generates the CSR request and save it
csr, key, err := pkiutil.NewCSRAndKey(cfg)
if err := pkiutil.WriteKey(outdir, name, key); err != nil {
return errors.Wrapf(err, "failure while saving %s key", name)
}
if err := pkiutil.WriteCSR(outdir, name, csr); err != nil {
return errors.Wrapf(err, "failure while saving %s CSR", name)
}
return nil
}
func certToConfig(cert *x509.Certificate) *certutil.Config {
return &certutil.Config{
CommonName: cert.Subject.CommonName,
Organization: cert.Subject.Organization,
AltNames: certutil.AltNames{
IPs: cert.IPAddresses,
DNSNames: cert.DNSNames,
},
Usages: cert.ExtKeyUsage,
}
}

View File

@ -0,0 +1,270 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net"
"os"
"path/filepath"
"testing"
"time"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
var (
testCACertCfg = &certutil.Config{CommonName: "kubernetes"}
testCACert, testCAKey, _ = pkiutil.NewCertificateAuthority(testCACertCfg)
testCertCfg = &certutil.Config{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
)
func TestNewManager(t *testing.T) {
tests := []struct {
name string
cfg *kubeadmapi.ClusterConfiguration
expectedCertificates int
}{
{
name: "cluster with local etcd",
cfg: &kubeadmapi.ClusterConfiguration{},
expectedCertificates: 10, //[admin apiserver apiserver-etcd-client apiserver-kubelet-client controller-manager etcd/healthcheck-client etcd/peer etcd/server front-proxy-client scheduler]
},
{
name: "cluster with external etcd",
cfg: &kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{
External: &kubeadmapi.ExternalEtcd{},
},
},
expectedCertificates: 6, // [admin apiserver apiserver-kubelet-client controller-manager front-proxy-client scheduler]
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
rm, err := NewManager(test.cfg, "")
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
if len(rm.Certificates()) != test.expectedCertificates {
t.Errorf("Expected %d certificates, saw %d", test.expectedCertificates, len(rm.Certificates()))
}
})
}
}
func TestRenewUsingLocalCA(t *testing.T) {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil {
t.Fatalf("couldn't write out CA certificate to %s", dir)
}
cfg := &kubeadmapi.ClusterConfiguration{
CertificatesDir: dir,
}
rm, err := NewManager(cfg, dir)
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
tests := []struct {
name string
certName string
createCertFunc func() *x509.Certificate
}{
{
name: "Certificate renewal for a PKI certificate",
certName: "apiserver",
createCertFunc: func() *x509.Certificate {
return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey)
},
},
{
name: "Certificate renewal for a certificate embedded in a kubeconfig file",
certName: "admin.conf",
createCertFunc: func() *x509.Certificate {
return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cert := test.createCertFunc()
time.Sleep(1 * time.Second)
_, err := rm.RenewUsingLocalCA(test.certName)
if err != nil {
t.Fatalf("error renewing certificate: %v", err)
}
newCert, err := rm.certificates[test.certName].readwriter.Read()
if err != nil {
t.Fatalf("error reading renewed certificate: %v", err)
}
if newCert.SerialNumber.Cmp(cert.SerialNumber) == 0 {
t.Fatal("expected new certificate, but renewed certificate has same serial number")
}
if !newCert.NotAfter.After(cert.NotAfter) {
t.Fatalf("expected new certificate with updated expiration, but renewed certificate has same NotAfter value: saw %s, expected greather than %s", newCert.NotAfter, cert.NotAfter)
}
certtestutil.AssertCertificateIsSignedByCa(t, newCert, testCACert)
certtestutil.AssertCertificateHasClientAuthUsage(t, newCert)
certtestutil.AssertCertificateHasOrganizations(t, newCert, testCertCfg.Organization...)
certtestutil.AssertCertificateHasCommonName(t, newCert, testCertCfg.CommonName)
certtestutil.AssertCertificateHasDNSNames(t, newCert, testCertCfg.AltNames.DNSNames...)
certtestutil.AssertCertificateHasIPAddresses(t, newCert, testCertCfg.AltNames.IPs...)
})
}
}
func TestCreateRenewCSR(t *testing.T) {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
outdir := filepath.Join(dir, "out")
if err := os.MkdirAll(outdir, 0755); err != nil {
t.Fatalf("couldn't create %s", outdir)
}
if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil {
t.Fatalf("couldn't write out CA certificate to %s", dir)
}
cfg := &kubeadmapi.ClusterConfiguration{
CertificatesDir: dir,
}
rm, err := NewManager(cfg, dir)
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
tests := []struct {
name string
certName string
createCertFunc func() *x509.Certificate
}{
{
name: "Creation of a CSR request for renewal of a PKI certificate",
certName: "apiserver",
createCertFunc: func() *x509.Certificate {
return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey)
},
},
{
name: "Creation of a CSR request for renewal of a certificate embedded in a kubeconfig file",
certName: "admin.conf",
createCertFunc: func() *x509.Certificate {
return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.createCertFunc()
time.Sleep(1 * time.Second)
err := rm.CreateRenewCSR(test.certName, outdir)
if err != nil {
t.Fatalf("error renewing certificate: %v", err)
}
file := fmt.Sprintf("%s.key", test.certName)
if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) {
t.Errorf("Expected file %s does not exist", file)
}
file = fmt.Sprintf("%s.csr", test.certName)
if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) {
t.Errorf("Expected file %s does not exist", file)
}
})
}
}
func TestCertToConfig(t *testing.T) {
expectedConfig := &certutil.Config{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
cert := &x509.Certificate{
Subject: pkix.Name{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
DNSNames: []string{"test-domain.space"},
IPAddresses: []net.IP{net.ParseIP("10.100.0.1")},
}
cfg := certToConfig(cert)
if cfg.CommonName != expectedConfig.CommonName {
t.Errorf("expected common name %q, got %q", expectedConfig.CommonName, cfg.CommonName)
}
if len(cfg.Organization) != 1 || cfg.Organization[0] != expectedConfig.Organization[0] {
t.Errorf("expected organization %v, got %v", expectedConfig.Organization, cfg.Organization)
}
if len(cfg.Usages) != 1 || cfg.Usages[0] != expectedConfig.Usages[0] {
t.Errorf("expected ext key usage %v, got %v", expectedConfig.Usages, cfg.Usages)
}
if len(cfg.AltNames.IPs) != 1 || cfg.AltNames.IPs[0].String() != expectedConfig.AltNames.IPs[0].String() {
t.Errorf("expected SAN IPs %v, got %v", expectedConfig.AltNames.IPs, cfg.AltNames.IPs)
}
if len(cfg.AltNames.DNSNames) != 1 || cfg.AltNames.DNSNames[0] != expectedConfig.AltNames.DNSNames[0] {
t.Errorf("expected SAN DNSNames %v, got %v", expectedConfig.AltNames.DNSNames, cfg.AltNames.DNSNames)
}
}

View File

@ -0,0 +1,173 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto"
"crypto/x509"
"path/filepath"
"github.com/pkg/errors"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
// certificateReadWriter defines the behavior of a component that
// read or write a certificate stored/embedded in a file
type certificateReadWriter interface {
// Read a certificate stored/embedded in a file
Read() (*x509.Certificate, error)
// Write (update) a certificate stored/embedded in a file
Write(*x509.Certificate, crypto.Signer) error
}
// pkiCertificateReadWriter defines a certificateReadWriter for certificate files
// in the K8s pki managed by kubeadm
type pkiCertificateReadWriter struct {
baseName string
certificateDir string
}
// newPKICertificateReadWriter return a new pkiCertificateReadWriter
func newPKICertificateReadWriter(certificateDir string, baseName string) *pkiCertificateReadWriter {
return &pkiCertificateReadWriter{
baseName: baseName,
certificateDir: certificateDir,
}
}
// Read a certificate from a file the K8s pki managed by kubeadm
func (rw *pkiCertificateReadWriter) Read() (*x509.Certificate, error) {
certificatePath, _ := pkiutil.PathsForCertAndKey(rw.certificateDir, rw.baseName)
certs, err := certutil.CertsFromFile(certificatePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to load existing certificate %s", rw.baseName)
}
if len(certs) != 1 {
return nil, errors.Errorf("wanted exactly one certificate, got %d", len(certs))
}
return certs[0], nil
}
// Write a certificate to files in the K8s pki managed by kubeadm
func (rw *pkiCertificateReadWriter) Write(newCert *x509.Certificate, newKey crypto.Signer) error {
if err := pkiutil.WriteCertAndKey(rw.certificateDir, rw.baseName, newCert, newKey); err != nil {
return errors.Wrapf(err, "failed to write new certificate %s", rw.baseName)
}
return nil
}
// kubeConfigReadWriter defines a certificateReadWriter for certificate files
// embedded in the kubeConfig files managed by kubeadm, and more specifically
// for the client certificate of the AuthInfo
type kubeConfigReadWriter struct {
kubernetesDir string
kubeConfigFileName string
kubeConfigFilePath string
kubeConfig *clientcmdapi.Config
}
// newKubeconfigReadWriter return a new kubeConfigReadWriter
func newKubeconfigReadWriter(kubernetesDir string, kubeConfigFileName string) *kubeConfigReadWriter {
return &kubeConfigReadWriter{
kubernetesDir: kubernetesDir,
kubeConfigFileName: kubeConfigFileName,
kubeConfigFilePath: filepath.Join(kubernetesDir, kubeConfigFileName),
}
}
// Read a certificate embedded in kubeConfig file managed by kubeadm.
// Please note that the kubeConfig file itself is kept in the ReadWriter state thus allowing
// to preserve the attributes (Context, Servers, AuthInfo etc.)
func (rw *kubeConfigReadWriter) Read() (*x509.Certificate, error) {
// try to load the kubeConfig file
kubeConfig, err := clientcmd.LoadFromFile(rw.kubeConfigFilePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to load kubeConfig file %s", rw.kubeConfigFilePath)
}
// get current context
if _, ok := kubeConfig.Contexts[kubeConfig.CurrentContext]; !ok {
return nil, errors.Errorf("invalid kubeConfig file %s: missing context %s", rw.kubeConfigFilePath, kubeConfig.CurrentContext)
}
// get cluster info for current context and ensure a server certificate is embedded in it
clusterName := kubeConfig.Contexts[kubeConfig.CurrentContext].Cluster
if _, ok := kubeConfig.Clusters[clusterName]; !ok {
return nil, errors.Errorf("invalid kubeConfig file %s: missing cluster %s", rw.kubeConfigFilePath, clusterName)
}
cluster := kubeConfig.Clusters[clusterName]
if len(cluster.CertificateAuthorityData) == 0 {
return nil, errors.Errorf("kubeConfig file %s does not have and embedded server certificate", rw.kubeConfigFilePath)
}
// get auth info for current context and ensure a client certificate is embedded in it
authInfoName := kubeConfig.Contexts[kubeConfig.CurrentContext].AuthInfo
if _, ok := kubeConfig.AuthInfos[authInfoName]; !ok {
return nil, errors.Errorf("invalid kubeConfig file %s: missing authInfo %s", rw.kubeConfigFilePath, authInfoName)
}
authInfo := kubeConfig.AuthInfos[authInfoName]
if len(authInfo.ClientCertificateData) == 0 {
return nil, errors.Errorf("kubeConfig file %s does not have and embedded client certificate", rw.kubeConfigFilePath)
}
// parse the client certificate, retrive the cert config and then renew it
certs, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
if err != nil {
return nil, errors.Wrapf(err, "kubeConfig file %s does not contain a valid client certificate", rw.kubeConfigFilePath)
}
rw.kubeConfig = kubeConfig
return certs[0], nil
}
// Write a certificate embedded in kubeConfig file managed by kubeadm
// Please note that all the other attribute of the kubeConfig file are preserved, but this
// requires to call Read before Write
func (rw *kubeConfigReadWriter) Write(newCert *x509.Certificate, newKey crypto.Signer) error {
// check if Read was called before Write
if rw.kubeConfig == nil {
return errors.Errorf("failed to Write kubeConfig file with renewd certs. It is necessary to call Read before Write")
}
// encodes the new key
encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(newKey)
if err != nil {
return errors.Wrapf(err, "failed to marshal private key to PEM")
}
// get auth info for current context and ensure a client certificate is embedded in it
authInfoName := rw.kubeConfig.Contexts[rw.kubeConfig.CurrentContext].AuthInfo
// create a kubeConfig copy with the new client certs
newConfig := rw.kubeConfig.DeepCopy()
newConfig.AuthInfos[authInfoName].ClientKeyData = encodedClientKey
newConfig.AuthInfos[authInfoName].ClientCertificateData = pkiutil.EncodeCertPEM(newCert)
// writes the kubeConfig to disk
return clientcmd.WriteToFile(*newConfig, rw.kubeConfigFilePath)
}

View File

@ -0,0 +1,179 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto"
"crypto/x509"
"net"
"os"
"path/filepath"
"testing"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
pkiutil "k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
func TestPKICertificateReadWriter(t *testing.T) {
// creates a tmp folder
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
// creates a certificate
cert := writeTestCertificate(t, dir, "test", testCACert, testCAKey)
// Creates a pkiCertificateReadWriter
pkiReadWriter := newPKICertificateReadWriter(dir, "test")
// Reads the certificate
readCert, err := pkiReadWriter.Read()
if err != nil {
t.Fatalf("couldn't read certificate: %v", err)
}
// Check if the certificate read from disk is equal to the original one
if !cert.Equal(readCert) {
t.Errorf("read cert does not match with expected cert")
}
// Create a new cert
newCert, newkey, err := pkiutil.NewCertAndKey(testCACert, testCAKey, testCertCfg)
if err != nil {
t.Fatalf("couldn't generate certificate: %v", err)
}
// Writes the new certificate
err = pkiReadWriter.Write(newCert, newkey)
if err != nil {
t.Fatalf("couldn't write new certificate: %v", err)
}
// Reads back the new certificate
readCert, err = pkiReadWriter.Read()
if err != nil {
t.Fatalf("couldn't read new certificate: %v", err)
}
// Check if the new certificate read from disk is equal to the original one
if !newCert.Equal(readCert) {
t.Error("read cert does not match with expected new cert")
}
}
func TestKubeconfigReadWriter(t *testing.T) {
// creates a tmp folder
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
// creates a certificate and then embeds it into a kubeconfig file
cert := writeTestKubeconfig(t, dir, "test", testCACert, testCAKey)
// Creates a KubeconfigReadWriter
kubeconfigReadWriter := newKubeconfigReadWriter(dir, "test")
// Reads the certificate embedded in a kubeconfig
readCert, err := kubeconfigReadWriter.Read()
if err != nil {
t.Fatalf("couldn't read embedded certificate: %v", err)
}
// Check if the certificate read from disk is equal to the original one
if !cert.Equal(readCert) {
t.Errorf("read cert does not match with expected cert")
}
// Create a new cert
newCert, newkey, err := pkiutil.NewCertAndKey(testCACert, testCAKey, testCertCfg)
if err != nil {
t.Fatalf("couldn't generate certificate: %v", err)
}
// Writes the new certificate embedded in a kubeconfig
err = kubeconfigReadWriter.Write(newCert, newkey)
if err != nil {
t.Fatalf("couldn't write new embedded certificate: %v", err)
}
// Reads back the new certificate embedded in a kubeconfig writer
readCert, err = kubeconfigReadWriter.Read()
if err != nil {
t.Fatalf("couldn't read new embedded certificate: %v", err)
}
// Check if the new certificate read from disk is equal to the original one
if !newCert.Equal(readCert) {
t.Errorf("read cert does not match with expected new cert")
}
}
// writeTestCertificate is a utility for creating a test certificate
func writeTestCertificate(t *testing.T, dir, name string, caCert *x509.Certificate, caKey crypto.Signer) *x509.Certificate {
cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, testCertCfg)
if err != nil {
t.Fatalf("couldn't generate certificate: %v", err)
}
if err := pkiutil.WriteCertAndKey(dir, name, cert, key); err != nil {
t.Fatalf("couldn't write out certificate %s to %s", name, dir)
}
return cert
}
// writeTestKubeconfig is a utility for creating a test kubeconfig with an embedded certificate
func writeTestKubeconfig(t *testing.T, dir, name string, caCert *x509.Certificate, caKey crypto.Signer) *x509.Certificate {
cfg := &certutil.Config{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
}
cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, cfg)
if err != nil {
t.Fatalf("couldn't generate certificate: %v", err)
}
encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(key)
if err != nil {
t.Fatalf("failed to marshal private key to PEM: %v", err)
}
certificateAuthorityData := pkiutil.EncodeCertPEM(caCert)
config := kubeconfigutil.CreateWithCerts(
"https://localhost:1234",
"kubernetes-test",
"user-test",
certificateAuthorityData,
encodedClientKey,
pkiutil.EncodeCertPEM(cert),
)
if err := clientcmd.WriteToFile(*config, filepath.Join(dir, name)); err != nil {
t.Fatalf("couldn't write out certificate")
}
return cert
}

View File

@ -1,131 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto/x509"
"path/filepath"
"github.com/pkg/errors"
"k8s.io/client-go/tools/clientcmd"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/keyutil"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
)
// RenewExistingCert loads a certificate file, uses the renew interface to renew it,
// and saves the resulting certificate and key over the old one.
func RenewExistingCert(certsDir, baseName string, impl Interface) error {
certificatePath, _ := pkiutil.PathsForCertAndKey(certsDir, baseName)
certs, err := certutil.CertsFromFile(certificatePath)
if err != nil {
return errors.Wrapf(err, "failed to load existing certificate %s", baseName)
}
if len(certs) != 1 {
return errors.Errorf("wanted exactly one certificate, got %d", len(certs))
}
cfg := certToConfig(certs[0])
newCert, newKey, err := impl.Renew(cfg)
if err != nil {
return errors.Wrapf(err, "failed to renew certificate %s", baseName)
}
if err := pkiutil.WriteCertAndKey(certsDir, baseName, newCert, newKey); err != nil {
return errors.Wrapf(err, "failed to write new certificate %s", baseName)
}
return nil
}
// RenewEmbeddedClientCert loads a kubeconfig file, uses the renew interface to renew the client certificate
// embedded in it, and then saves the resulting kubeconfig and key over the old one.
func RenewEmbeddedClientCert(kubeConfigFileDir, kubeConfigFileName string, impl Interface) error {
kubeConfigFilePath := filepath.Join(kubeConfigFileDir, kubeConfigFileName)
// try to load the kubeconfig file
kubeconfig, err := clientcmd.LoadFromFile(kubeConfigFilePath)
if err != nil {
return errors.Wrapf(err, "failed to load kubeconfig file %s", kubeConfigFilePath)
}
// get current context
if _, ok := kubeconfig.Contexts[kubeconfig.CurrentContext]; !ok {
return errors.Errorf("invalid kubeconfig file %s: missing context %s", kubeConfigFilePath, kubeconfig.CurrentContext)
}
// get cluster info for current context and ensure a server certificate is embedded in it
clusterName := kubeconfig.Contexts[kubeconfig.CurrentContext].Cluster
if _, ok := kubeconfig.Clusters[clusterName]; !ok {
return errors.Errorf("invalid kubeconfig file %s: missing cluster %s", kubeConfigFilePath, clusterName)
}
cluster := kubeconfig.Clusters[clusterName]
if len(cluster.CertificateAuthorityData) == 0 {
return errors.Errorf("kubeconfig file %s does not have and embedded server certificate", kubeConfigFilePath)
}
// get auth info for current context and ensure a client certificate is embedded in it
authInfoName := kubeconfig.Contexts[kubeconfig.CurrentContext].AuthInfo
if _, ok := kubeconfig.AuthInfos[authInfoName]; !ok {
return errors.Errorf("invalid kubeconfig file %s: missing authInfo %s", kubeConfigFilePath, authInfoName)
}
authInfo := kubeconfig.AuthInfos[authInfoName]
if len(authInfo.ClientCertificateData) == 0 {
return errors.Errorf("kubeconfig file %s does not have and embedded client certificate", kubeConfigFilePath)
}
// parse the client certificate, retrive the cert config and then renew it
certs, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
if err != nil {
return errors.Wrapf(err, "kubeconfig file %s does not contain a valid client certificate", kubeConfigFilePath)
}
cfg := certToConfig(certs[0])
newCert, newKey, err := impl.Renew(cfg)
if err != nil {
return errors.Wrapf(err, "failed to renew certificate embedded in %s", kubeConfigFilePath)
}
// encodes the new key
encodedClientKey, err := keyutil.MarshalPrivateKeyToPEM(newKey)
if err != nil {
return errors.Wrapf(err, "failed to marshal private key to PEM")
}
// create a kubeconfig copy with the new client certs
newConfig := kubeconfig.DeepCopy()
newConfig.AuthInfos[authInfoName].ClientKeyData = encodedClientKey
newConfig.AuthInfos[authInfoName].ClientCertificateData = pkiutil.EncodeCertPEM(newCert)
// writes the kubeconfig to disk
return clientcmd.WriteToFile(*newConfig, kubeConfigFilePath)
}
func certToConfig(cert *x509.Certificate) *certutil.Config {
return &certutil.Config{
CommonName: cert.Subject.CommonName,
Organization: cert.Subject.Organization,
AltNames: certutil.AltNames{
IPs: cert.IPAddresses,
DNSNames: cert.DNSNames,
},
Usages: cert.ExtKeyUsage,
}
}

Some files were not shown because too many files have changed in this diff Show More