Merge branch 'master' into upgrade_aliases_branch

This commit is contained in:
Jing Ai 2017-12-11 11:57:33 -08:00
commit 305656c706
23 changed files with 747 additions and 193 deletions

View File

@ -1,38 +1,45 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.8.4](#v184)
- [Downloads for v1.8.4](#downloads-for-v184)
- [v1.8.5](#v185)
- [Downloads for v1.8.5](#downloads-for-v185)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.8.3](#changelog-since-v183)
- [Changelog since v1.8.4](#changelog-since-v184)
- [Other notable changes](#other-notable-changes)
- [v1.8.3](#v183)
- [Downloads for v1.8.3](#downloads-for-v183)
- [v1.8.4](#v184)
- [Downloads for v1.8.4](#downloads-for-v184)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.8.2](#changelog-since-v182)
- [Changelog since v1.8.3](#changelog-since-v183)
- [Other notable changes](#other-notable-changes-1)
- [v1.8.2](#v182)
- [Downloads for v1.8.2](#downloads-for-v182)
- [v1.8.3](#v183)
- [Downloads for v1.8.3](#downloads-for-v183)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.8.1](#changelog-since-v181)
- [Changelog since v1.8.2](#changelog-since-v182)
- [Other notable changes](#other-notable-changes-2)
- [v1.8.1](#v181)
- [Downloads for v1.8.1](#downloads-for-v181)
- [v1.8.2](#v182)
- [Downloads for v1.8.2](#downloads-for-v182)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.8.0](#changelog-since-v180)
- [Action Required](#action-required)
- [Changelog since v1.8.1](#changelog-since-v181)
- [Other notable changes](#other-notable-changes-3)
- [v1.8.0](#v180)
- [Downloads for v1.8.0](#downloads-for-v180)
- [v1.8.1](#v181)
- [Downloads for v1.8.1](#downloads-for-v181)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.8.0](#changelog-since-v180)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-4)
- [v1.8.0](#v180)
- [Downloads for v1.8.0](#downloads-for-v180)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Introduction to v1.8.0](#introduction-to-v180)
- [Major Themes](#major-themes)
- [SIG API Machinery](#sig-api-machinery)
@ -93,49 +100,123 @@
- [External Dependencies](#external-dependencies)
- [v1.8.0-rc.1](#v180-rc1)
- [Downloads for v1.8.0-rc.1](#downloads-for-v180-rc1)
- [Client Binaries](#client-binaries-5)
- [Server Binaries](#server-binaries-5)
- [Node Binaries](#node-binaries-5)
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-4)
- [v1.8.0-beta.1](#v180-beta1)
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
- [Client Binaries](#client-binaries-6)
- [Server Binaries](#server-binaries-6)
- [Node Binaries](#node-binaries-6)
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
- [Action Required](#action-required-2)
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-5)
- [v1.8.0-alpha.3](#v180-alpha3)
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
- [v1.8.0-beta.1](#v180-beta1)
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
- [Client Binaries](#client-binaries-7)
- [Server Binaries](#server-binaries-7)
- [Node Binaries](#node-binaries-7)
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
- [Action Required](#action-required-3)
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-6)
- [v1.8.0-alpha.2](#v180-alpha2)
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
- [v1.8.0-alpha.3](#v180-alpha3)
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
- [Client Binaries](#client-binaries-8)
- [Server Binaries](#server-binaries-8)
- [Node Binaries](#node-binaries-8)
- [Changelog since v1.7.0](#changelog-since-v170)
- [Action Required](#action-required-4)
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-7)
- [v1.8.0-alpha.1](#v180-alpha1)
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
- [v1.8.0-alpha.2](#v180-alpha2)
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
- [Client Binaries](#client-binaries-9)
- [Server Binaries](#server-binaries-9)
- [Node Binaries](#node-binaries-9)
- [Changelog since v1.7.0](#changelog-since-v170)
- [Action Required](#action-required-4)
- [Other notable changes](#other-notable-changes-8)
- [v1.8.0-alpha.1](#v180-alpha1)
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
- [Client Binaries](#client-binaries-10)
- [Server Binaries](#server-binaries-10)
- [Node Binaries](#node-binaries-10)
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4)
- [Action Required](#action-required-5)
- [Other notable changes](#other-notable-changes-8)
- [Other notable changes](#other-notable-changes-9)
<!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY -->
# v1.8.5
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)
## Downloads for v1.8.5
filename | sha256 hash
-------- | -----------
[kubernetes.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes.tar.gz) | `7a7993e5dee72ede890e180112959a1fe179b592178ef24d04c48212c09345b8`
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-src.tar.gz) | `358de791b2bfd85a9b76ee42629dd8d07ae46710ad2bd5a37a20136ec3c7cea8`
### Client Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-darwin-386.tar.gz) | `89b57f6eccc02c95c4de4db189092756a9bf85033200a11db56ff30a38e2dda0`
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-darwin-amd64.tar.gz) | `a02bbbfe403db81f7a6317e752d9fe7853b583e34077eebfa05c7f0ec4a89712`
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-386.tar.gz) | `a1c047cdfbcb753a8beabcf6358863c125d46e71c4d3cbe56f06237ce6f2fed6`
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-amd64.tar.gz) | `c32b6f90f1e8a15451f0d412d6d1f3db28948d2f7d76d4e28d83c11e1eb25f20`
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-arm64.tar.gz) | `a89a5f2889e0aae0caa673a2664c7af40e488a55ae26ab7a55599b0fbd87e281`
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-arm.tar.gz) | `5b485bbac15b8621be7ff936a5f02565511b9b00e56a5b67dfa1b273586d5af1`
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-ppc64le.tar.gz) | `ae4e8fcd230198bc3ad1294d61e04602a6bdd3c836997d48fd3262ab24e2885c`
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-linux-s390x.tar.gz) | `c7803f0e3480dfdeedd8afd2d460ab6badf0e8879febafa30a4a3fbc87554507`
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-windows-386.tar.gz) | `b78e04b0bc400f3f7a012cef630fd3757c12d54f16b180470d722c4d678867e1`
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-client-windows-amd64.tar.gz) | `a0b32d3fcd5e692a452d2a38a6dd34a7f3e40e22e88e4cfba77ae224e07d8565`
### Server Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-server-linux-amd64.tar.gz) | `523f747f68842000ca88c84e8db07243248f6064295701b2168c64d2b77adfcb`
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-server-linux-arm64.tar.gz) | `3e43fccbe224ae7b20fd462f9c5932e5c5d58f0a3d6f67365a9e0d4e00fa796a`
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-server-linux-arm.tar.gz) | `678c92b8b7b0616d102f9b74c9a11dd2763ba67bfa30075aca964aead2fe5370`
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-server-linux-ppc64le.tar.gz) | `55993ca6301988412876b79216442968834847a571b6423235a0c7bffe65a56a`
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-server-linux-s390x.tar.gz) | `32cb7484cdbeb4153fc672373055a4e8a05a61f83c722bef623f3c6922c01faa`
### Node Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-linux-amd64.tar.gz) | `a3ae45d389001788401c07c5b3d14a9f0af842466080a3c31b6a03200b27231b`
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-linux-arm64.tar.gz) | `642bd5c1c2728463667b1e0e6a110e2bf732972c16e8900701320a7fe85ead89`
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-linux-arm.tar.gz) | `5b654c6fad642739f949be245eae94455fd9f2a25a388ca8effb01c49bd3451e`
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-linux-ppc64le.tar.gz) | `3eeec484d7ea6caf1a3f8157d2fe504c411f27ee9930d744a017adefae191786`
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-linux-s390x.tar.gz) | `5874957a48d103e9dd9c1bdbecced59d13bc3ac59d2dec44de989521f711c842`
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.5/kubernetes-node-windows-amd64.tar.gz) | `46a57f13bc5a4b78cd58b9914257aff15163cee24f3e43bf6c3a0a87ae3ed030`
## Changelog since v1.8.4
### Other notable changes
* Fix scheduler cache panic when updating pod conditions. ([#56731](https://github.com/kubernetes/kubernetes/pull/56731), [@bsalamat](https://github.com/bsalamat))
* Add new Prometheus metric that monitors the remaining lifetime of certificates used to authenticate requests to the API server. ([#50387](https://github.com/kubernetes/kubernetes/pull/50387), [@jcbsmpsn](https://github.com/jcbsmpsn))
* scheduler: Fix issue were a new pod with affinity gets stuck at `creating` because the node had been deleted but the pod still exists. ([#56835](https://github.com/kubernetes/kubernetes/pull/56835), [@wenlxie](https://github.com/wenlxie))
* Updated Dashboard add-on to version 1.8.0: The Dashboard add-on now deploys with https enabled. The Dashboard can be accessed via kubectl proxy at http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/. The /ui redirect is deprecated and will be removed in 1.10. ([#53046](https://github.com/kubernetes/kubernetes/pull/53046), [@maciaszczykm](https://github.com/maciaszczykm))
* Fix issue where masquerade rules are flushed in GCE k8s clusters. ([#56729](https://github.com/kubernetes/kubernetes/pull/56729), [@dnardo](https://github.com/dnardo))
* kubelet: Fix bug where `runAsUser: MustRunAsNonRoot` strategy didn't reject a pod with a non-numeric `USER`. ([#56708](https://github.com/kubernetes/kubernetes/pull/56708), [@php-coder](https://github.com/php-coder))
* Add iptables rules to allow Pod traffic even when default iptables policy is to reject. ([#52569](https://github.com/kubernetes/kubernetes/pull/52569), [@tmjd](https://github.com/tmjd))
* Fix a bug in GCE multizonal clusters where PersistentVolumes were sometimes created in zones without nodes. ([#52322](https://github.com/kubernetes/kubernetes/pull/52322), [@davidz627](https://github.com/davidz627))
* If a non-absolute mountPath is passed to the kubelet, prefix it with the appropriate root path. ([#55665](https://github.com/kubernetes/kubernetes/pull/55665), [@brendandburns](https://github.com/brendandburns))
* add GRS, RAGRS storage account type support for azure disk ([#55931](https://github.com/kubernetes/kubernetes/pull/55931), [@andyzhangx](https://github.com/andyzhangx))
* Fix a typo in prometheus-to-sd configuration, that drops some stackdriver metrics. ([#56473](https://github.com/kubernetes/kubernetes/pull/56473), [@loburm](https://github.com/loburm))
* Fixes server name verification of aggregated API servers and webhook admission endpoints ([#56415](https://github.com/kubernetes/kubernetes/pull/56415), [@liggitt](https://github.com/liggitt))
* Update jquery and bootstrap dependencies ([#56445](https://github.com/kubernetes/kubernetes/pull/56445), [@dashpole](https://github.com/dashpole))
* Fix CRI localhost seccomp path in format localhost//profileRoot/profileName. ([#55450](https://github.com/kubernetes/kubernetes/pull/55450), [@feiskyer](https://github.com/feiskyer))
* support mount options in azure file ([#54674](https://github.com/kubernetes/kubernetes/pull/54674), [@andyzhangx](https://github.com/andyzhangx))
* kube-apiserver: fixed --oidc-username-prefix and --oidc-group-prefix flags which previously weren't correctly enabled ([#56175](https://github.com/kubernetes/kubernetes/pull/56175), [@ericchiang](https://github.com/ericchiang))
* fluentd-gcp addon: Fix fluentd deployment on GCP when custom resources are set. ([#55950](https://github.com/kubernetes/kubernetes/pull/55950), [@crassirostris](https://github.com/crassirostris))
* API discovery failures no longer crash the kube controller manager via the garbage collector. ([#55259](https://github.com/kubernetes/kubernetes/pull/55259), [@ironcladlou](https://github.com/ironcladlou))
* Fix bug where master startup script on GCP failed randomly due to concurrent iptables invocations. ([#55945](https://github.com/kubernetes/kubernetes/pull/55945), [@x13n](https://github.com/x13n))
# v1.8.4
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)

View File

@ -1,41 +1,120 @@
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [v1.9.0-beta.1](#v190-beta1)
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
- [v1.9.0-beta.2](#v190-beta2)
- [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2)
- [Client Binaries](#client-binaries)
- [Server Binaries](#server-binaries)
- [Node Binaries](#node-binaries)
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
- [Action Required](#action-required)
- [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1)
- [Other notable changes](#other-notable-changes)
- [v1.9.0-alpha.3](#v190-alpha3)
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
- [v1.9.0-beta.1](#v190-beta1)
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
- [Client Binaries](#client-binaries-1)
- [Server Binaries](#server-binaries-1)
- [Node Binaries](#node-binaries-1)
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
- [Action Required](#action-required-1)
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
- [Action Required](#action-required)
- [Other notable changes](#other-notable-changes-1)
- [v1.9.0-alpha.2](#v190-alpha2)
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
- [v1.9.0-alpha.3](#v190-alpha3)
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
- [Client Binaries](#client-binaries-2)
- [Server Binaries](#server-binaries-2)
- [Node Binaries](#node-binaries-2)
- [Changelog since v1.8.0](#changelog-since-v180)
- [Action Required](#action-required-2)
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
- [Action Required](#action-required-1)
- [Other notable changes](#other-notable-changes-2)
- [v1.9.0-alpha.1](#v190-alpha1)
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
- [v1.9.0-alpha.2](#v190-alpha2)
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
- [Client Binaries](#client-binaries-3)
- [Server Binaries](#server-binaries-3)
- [Node Binaries](#node-binaries-3)
- [Changelog since v1.8.0](#changelog-since-v180)
- [Action Required](#action-required-2)
- [Other notable changes](#other-notable-changes-3)
- [v1.9.0-alpha.1](#v190-alpha1)
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
- [Client Binaries](#client-binaries-4)
- [Server Binaries](#server-binaries-4)
- [Node Binaries](#node-binaries-4)
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
- [Action Required](#action-required-3)
- [Other notable changes](#other-notable-changes-3)
- [Other notable changes](#other-notable-changes-4)
<!-- END MUNGE: GENERATED_TOC -->
<!-- NEW RELEASE NOTES ENTRY -->
# v1.9.0-beta.2
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)
## Downloads for v1.9.0-beta.2
filename | sha256 hash
-------- | -----------
[kubernetes.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes.tar.gz) | `e5c88addf6aca01635f283021a72e05be99daf3e87fd3cda92477d0ed63c2d11`
[kubernetes-src.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-src.tar.gz) | `2419a0ef3681460b64eefc083d07377786b308f6cc62d0618a5c74dfb4729b03`
### Client Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-client-darwin-386.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-darwin-386.tar.gz) | `68d971576c3e9a16fb736f06c07ce53b8371fc67c2f37fb60e9f3a366cd37a80`
[kubernetes-client-darwin-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | `36251b7b6043adb79706ac115181aa7ecf365ced9198a4c192f1fbc2817d030c`
[kubernetes-client-linux-386.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-386.tar.gz) | `585a3dd6a3440988bce3f83ea14fb9a0a18011bc62e28959301861faa06d6da9`
[kubernetes-client-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | `169769d6030d8c1d9d9bc01408b62ea3275d4632a7de85392fc95a48feeba522`
[kubernetes-client-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | `7841c2af49be9ae04cda305165b172021c0e72d809c2271d05061330c220256b`
[kubernetes-client-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-arm.tar.gz) | `9ab32843cec68b036de83f54a68c2273a913be5180dc20b5cf1e084b314a9a2d`
[kubernetes-client-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | `5a2bb39b78ef381382f9b8aac17d5dbcbef08a80ad3518ff2cf6c65bd7a6d07d`
[kubernetes-client-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | `ddf4b3780f5879b9fb9115353cc26234cfc3a6db63a3cd39122340189a4bf0ca`
[kubernetes-client-windows-386.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-windows-386.tar.gz) | `5960a0a50c92a788e90eca9d85a1d12ff1d41264816b55b3a1a28ffd3f6acf93`
[kubernetes-client-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | `d85778ace9bf25f5d3626aef3a9419a2c4aaa3847d5e0c2bf34d4dd8ae6b5205`
### Server Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-server-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | `43e16b3d79c2805d712fd61ed6fd110d9db09a60d39584ef78c24821eb32b77a`
[kubernetes-server-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | `8580e454e6c467a30687ff5c85248919b3c0d2d0114e28cb3bf64d2e8998ff00`
[kubernetes-server-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-arm.tar.gz) | `d2e767be85ebf7c6c537c8e796e8fe0ce8a3f2ca526984490646acd30bf5e6fc`
[kubernetes-server-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | `81dd9072e805c181b4db2dfd00fe2bdb43c00da9e07b50285bce703bfd0d75ba`
[kubernetes-server-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | `f432c816c755d05e62cb5d5e8ac08dcb60d0df6d5121e1adaf42a32de65d6174`
### Node Binaries
filename | sha256 hash
-------- | -----------
[kubernetes-node-linux-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | `2bf2268735ca4ecbdca1a692b25329d6d9d4805963cbe0cfcbb92fc725c42481`
[kubernetes-node-linux-arm64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | `3bb4a695fd2e4fca1c77283c1ad6c2914d12b33d9c5f64ac9c630a42d5e30ab2`
[kubernetes-node-linux-arm.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-linux-arm.tar.gz) | `331c1efadf99dcb634c8da301349e3be63d27a5c5f06cc124b59fcc8b8a91cb0`
[kubernetes-node-linux-ppc64le.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | `ab036fdb64ed4702d7dbbadddf77af90de35f73aa13854bb5accf82acc95c7e6`
[kubernetes-node-linux-s390x.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | `8257af566f98325549de320d2167c1f56fd137b6225c70f6c1e34507ba124a1f`
[kubernetes-node-windows-amd64.tar.gz](https://storage.googleapis.com/kubernetes-release-dashpole/release/v1.9.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | `4146fcb5bb6bf3e04641b27e4aa8501649178716fa16bd9bcb7f1fe3449db7f2`
## Changelog since v1.9.0-beta.1
### Other notable changes
* Add pvc as part of equivalence hash ([#56577](https://github.com/kubernetes/kubernetes/pull/56577), [@resouer](https://github.com/resouer))
* Fix port number and default Stackdriver Metadata Agent in daemon set configuration. ([#56576](https://github.com/kubernetes/kubernetes/pull/56576), [@kawych](https://github.com/kawych))
* Declare ipvs proxier beta ([#56623](https://github.com/kubernetes/kubernetes/pull/56623), [@m1093782566](https://github.com/m1093782566))
* Enable admissionregistration.k8s.io/v1beta1 by default in kube-apiserver. ([#56687](https://github.com/kubernetes/kubernetes/pull/56687), [@sttts](https://github.com/sttts))
* Support autoprobing floating-network-id for openstack cloud provider ([#52013](https://github.com/kubernetes/kubernetes/pull/52013), [@FengyunPan](https://github.com/FengyunPan))
* Audit webhook batching parameters are now configurable via command-line flags in the apiserver. ([#56638](https://github.com/kubernetes/kubernetes/pull/56638), [@crassirostris](https://github.com/crassirostris))
* Update kubectl to the stable version ([#54345](https://github.com/kubernetes/kubernetes/pull/54345), [@zouyee](https://github.com/zouyee))
* [scheduler] Fix issue new pod with affinity stuck at `creating` because node had been deleted but its pod still exists. ([#53647](https://github.com/kubernetes/kubernetes/pull/53647), [@wenlxie](https://github.com/wenlxie))
* Updated Dashboard add-on to version 1.8.0: The Dashboard add-on now deploys with https enabled. The Dashboard can be accessed via kubectl proxy at http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/. The /ui redirect is deprecated and will be removed in 1.10. ([#53046](https://github.com/kubernetes/kubernetes/pull/53046), [@maciaszczykm](https://github.com/maciaszczykm))
* AWS: Detect EBS volumes mounted via NVME and mount them ([#56607](https://github.com/kubernetes/kubernetes/pull/56607), [@justinsb](https://github.com/justinsb))
* fix CreateVolume func: use search mode instead ([#54687](https://github.com/kubernetes/kubernetes/pull/54687), [@andyzhangx](https://github.com/andyzhangx))
* kubelet: fix bug where `runAsUser: MustRunAsNonRoot` strategy didn't reject a pod with a non-numeric `USER`. ([#56503](https://github.com/kubernetes/kubernetes/pull/56503), [@php-coder](https://github.com/php-coder))
* kube-proxy addon tolerates all NoExecute and NoSchedule taints by default. ([#56589](https://github.com/kubernetes/kubernetes/pull/56589), [@mindprince](https://github.com/mindprince))
* Do not do file system resize on read-only mounts ([#56587](https://github.com/kubernetes/kubernetes/pull/56587), [@gnufied](https://github.com/gnufied))
* Mark v1beta1 NetworkPolicy types as deprecated ([#56425](https://github.com/kubernetes/kubernetes/pull/56425), [@cmluciano](https://github.com/cmluciano))
* Fix problem with /bin/bash ending up linked to dash ([#55018](https://github.com/kubernetes/kubernetes/pull/55018), [@dims](https://github.com/dims))
* Modifying etcd recovery steps for the case of failed upgrade ([#56500](https://github.com/kubernetes/kubernetes/pull/56500), [@sbezverk](https://github.com/sbezverk))
# v1.9.0-beta.1
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)

View File

@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.0-beta.2
name: heapster-v1.5.0-beta.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.0-beta.2
version: v1.5.0-beta.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: heapster
livenessProbe:
httpGet:
@ -58,7 +58,7 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: eventer
command:
- /eventer
@ -89,7 +89,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@ -118,7 +118,7 @@ spec:
- --memory={{base_eventer_memory}}
- --extra-memory={{eventer_memory_per_node}}Ki
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.0-beta.2
name: heapster-v1.5.0-beta.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.0-beta.2
version: v1.5.0-beta.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: heapster
livenessProbe:
@ -60,7 +60,7 @@ spec:
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --sink=gcm:?metrics=autoscaling
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: eventer
command:
- /eventer
@ -91,7 +91,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@ -120,7 +120,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -23,29 +23,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.0-beta.2
name: heapster-v1.5.0-beta.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.0-beta.2
version: v1.5.0-beta.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: heapster
livenessProbe:
httpGet:
@ -58,7 +58,7 @@ spec:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: eventer
command:
- /eventer
@ -89,7 +89,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential
@ -118,7 +118,7 @@ spec:
- --memory={{ base_eventer_memory }}
- --extra-memory={{ eventer_memory_per_node }}Ki
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=eventer
- --poll-period=300000
- --estimator=exponential

View File

@ -21,29 +21,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.0-beta.2
name: heapster-v1.5.0-beta.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.0-beta.2
version: v1.5.0-beta.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: heapster
livenessProbe:
httpGet:
@ -101,7 +101,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{metrics_memory_per_node}}Mi
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential

View File

@ -21,29 +21,29 @@ metadata:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.5.0-beta.2
name: heapster-v1.5.0-beta.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.5.0-beta.2
version: v1.5.0-beta.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.5.0-beta.2
version: v1.5.0-beta.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.2
- image: gcr.io/google_containers/heapster-amd64:v1.5.0-beta.3
name: heapster
livenessProbe:
httpGet:
@ -80,7 +80,7 @@ spec:
- --memory={{ base_metrics_memory }}
- --extra-memory={{ metrics_memory_per_node }}Mi
- --threshold=5
- --deployment=heapster-v1.5.0-beta.2
- --deployment=heapster-v1.5.0-beta.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential

View File

@ -664,6 +664,12 @@ ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-f
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
ADVANCED_AUDIT_POLICY: $(yaml-quote ${ADVANCED_AUDIT_POLICY:-})
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-})
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-})
ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-})
ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-})
ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-})
ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF: $(yaml-quote ${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-})
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
PROMETHEUS_TO_SD_ENDPOINT: $(yaml-quote ${PROMETHEUS_TO_SD_ENDPOINT:-})
PROMETHEUS_TO_SD_PREFIX: $(yaml-quote ${PROMETHEUS_TO_SD_PREFIX:-})

View File

@ -1612,6 +1612,24 @@ function start-kube-apiserver {
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-buffer-size=${ADVANCED_AUDIT_WEBHOOK_BUFFER_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE:-}" ]]; then
params+=" --audit-webhook-batch-max-size=${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_SIZE}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT:-}" ]]; then
params+=" --audit-webhook-batch-max-wait=${ADVANCED_AUDIT_WEBHOOK_MAX_BATCH_WAIT}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS:-}" ]]; then
params+=" --audit-webhook-batch-throttle-qps=${ADVANCED_AUDIT_WEBHOOK_THROTTLE_QPS}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST:-}" ]]; then
params+=" --audit-webhook-batch-throttle-burst=${ADVANCED_AUDIT_WEBHOOK_THROTTLE_BURST}"
fi
if [[ -n "${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF:-}" ]]; then
params+=" --audit-webhook-batch-initial-backoff=${ADVANCED_AUDIT_WEBHOOK_INITIAL_BACKOFF}"
fi
create-master-audit-webhook-config "${audit_webhook_config_file}"
audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\", \"type\": \"FileOrCreate\"}},"
@ -1820,6 +1838,10 @@ function start-kube-controller-manager {
[[ "${HPA_USE_REST_CLIENTS:-}" == "false" ]]; then
params+=" --horizontal-pod-autoscaler-use-rest-clients=false"
fi
if [[ -n "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
params+=" --pv-recycler-pod-template-filepath-nfs=$PV_RECYCLER_OVERRIDE_TEMPLATE"
params+=" --pv-recycler-pod-template-filepath-hostpath=$PV_RECYCLER_OVERRIDE_TEMPLATE"
fi
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
@ -1839,6 +1861,8 @@ function start-kube-controller-manager {
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{pv_recycler_mount}}@${PV_RECYCLER_MOUNT}@g" "${src_file}"
sed -i -e "s@{{pv_recycler_volume}}@${PV_RECYCLER_VOLUME}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
@ -2291,11 +2315,47 @@ function override-kubectl {
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
}
function override-pv-recycler {
if [[ -z "${PV_RECYCLER_OVERRIDE_TEMPLATE:-}" ]]; then
echo "PV_RECYCLER_OVERRIDE_TEMPLATE is not set"
exit 1
fi
PV_RECYCLER_VOLUME="{\"name\": \"pv-recycler-mount\",\"hostPath\": {\"path\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"type\": \"FileOrCreate\"}},"
PV_RECYCLER_MOUNT="{\"name\": \"pv-recycler-mount\",\"mountPath\": \"${PV_RECYCLER_OVERRIDE_TEMPLATE}\", \"readOnly\": true},"
cat > ${PV_RECYCLER_OVERRIDE_TEMPLATE} <<EOF
version: v1
kind: Pod
metadata:
generateName: pv-recycler-
namespace: default
spec:
activeDeadlineSeconds: 60
restartPolicy: Never
volumes:
- name: vol
containers:
- name: pv-recycler
image: gcr.io/google_containers/busybox:1.27
command:
- /bin/sh
args:
- -c
- test -e /scrub && rm -rf /scrub/..?* /scrub/.[!.]* /scrub/* && test -z $(ls -A /scrub) || exit 1
volumeMounts:
- name: vol
mountPath: /scrub
EOF
}
########### Main Function ###########
echo "Start to configure instance for kubernetes"
KUBE_HOME="/home/kubernetes"
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
PV_RECYCLER_OVERRIDE_TEMPLATE="${KUBE_HOME}/kube-manifests/kubernetes/pv-recycler-template.yaml"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
@ -2331,6 +2391,7 @@ if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
create-master-auth
create-master-kubelet-auth
create-master-etcd-auth
override-pv-recycler
else
create-node-pki
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}

View File

@ -62,6 +62,9 @@ ETCD_API="$(echo $VERSION_CONTENTS | cut -d '/' -f 2)"
# NOTE: NAME HAS TO BE EQUAL TO WHAT WE USE IN --name flag when starting etcd.
NAME="${NAME:-etcd-$(hostname)}"
INITIAL_CLUSTER="${INITIAL_CLUSTER:-${NAME}=http://localhost:2380}"
INITIAL_ADVERTISE_PEER_URLS="${INITIAL_ADVERTISE_PEER_URLS:-http://localhost:2380}"
# Port on which etcd is exposed.
etcd_port=2379
event_etcd_port=4002
@ -101,7 +104,7 @@ wait_for_cluster_healthy() {
# Wait until etcd and apiserver pods are down.
wait_for_etcd_and_apiserver_down() {
for i in $(seq 120); do
etcd=$(docker ps | grep etcd | grep -v etcd-empty-dir | grep -v etcd-monitor | wc -l)
etcd=$(docker ps | grep etcd-server | wc -l)
apiserver=$(docker ps | grep apiserver | wc -l)
# TODO: Theoretically it is possible, that apiserver and or etcd
# are currently down, but Kubelet is now restarting them and they
@ -134,6 +137,8 @@ if ! wait_for_etcd_and_apiserver_down; then
exit 1
fi
read -rsp $'Press enter when all etcd instances are down...\n'
# Create the sort of directory structure that etcd expects.
# If this directory already exists, remove it.
BACKUP_DIR="/var/tmp/backup"
@ -185,15 +190,13 @@ elif [ "${ETCD_API}" == "etcd3" ]; then
# Run etcdctl snapshot restore command and wait until it is finished.
# setting with --name in the etcd manifest file and then it seems to work.
# TODO(jsz): This command may not work in case of HA.
image=$(docker run -d -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \
docker run -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \
"gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${NAME}=http://localhost:2380; mv /${NAME}.etcd/member /var/tmp/backup/")
"/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${INITIAL_CLUSTER} --initial-advertise-peer-urls ${INITIAL_ADVERTISE_PEER_URLS}; mv /${NAME}.etcd/member /var/tmp/backup/"
if [ "$?" -ne "0" ]; then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Prepare container exit code: $(docker wait ${image})"
rm -f "${BACKUP_DIR}/${snapshot}"
fi

View File

@ -25,7 +25,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0-beta1",
"image": "gcr.io/google_containers/cluster-autoscaler:v1.1.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",

View File

@ -34,6 +34,8 @@
{% set cloud_config_volume = "" -%}
{% set additional_cloud_config_mount = "{\"name\": \"usrsharessl\",\"mountPath\": \"/usr/share/ssl\", \"readOnly\": true}, {\"name\": \"usrssl\",\"mountPath\": \"/usr/ssl\", \"readOnly\": true}, {\"name\": \"usrlibssl\",\"mountPath\": \"/usr/lib/ssl\", \"readOnly\": true}, {\"name\": \"usrlocalopenssl\",\"mountPath\": \"/usr/local/openssl\", \"readOnly\": true}," -%}
{% set additional_cloud_config_volume = "{\"name\": \"usrsharessl\",\"hostPath\": {\"path\": \"/usr/share/ssl\"}}, {\"name\": \"usrssl\",\"hostPath\": {\"path\": \"/usr/ssl\"}}, {\"name\": \"usrlibssl\",\"hostPath\": {\"path\": \"/usr/lib/ssl\"}}, {\"name\": \"usrlocalopenssl\",\"hostPath\": {\"path\": \"/usr/local/openssl\"}}," -%}
{% set pv_recycler_mount = "" -%}
{% set pv_recycler_volume = "" -%}
{% set srv_kube_path = "/srv/kubernetes" -%}
{% if grains.cloud is defined -%}
@ -131,6 +133,7 @@
"volumeMounts": [
{{cloud_config_mount}}
{{additional_cloud_config_mount}}
{{pv_recycler_mount}}
{ "name": "srvkube",
"mountPath": "{{srv_kube_path}}",
"readOnly": true},
@ -158,6 +161,7 @@
"volumes":[
{{cloud_config_volume}}
{{additional_cloud_config_volume}}
{{pv_recycler_volume}}
{ "name": "srvkube",
"hostPath": {
"path": "{{srv_kube_path}}"}

View File

@ -44,6 +44,7 @@ type instanceInfo struct {
// GetZone returns the Zone containing the current failure zone and locality region that the program is running in
func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
faultMutex.Lock()
defer faultMutex.Unlock()
if faultDomain == nil {
var err error
faultDomain, err = fetchFaultDomain()
@ -55,7 +56,6 @@ func (az *Cloud) GetZone() (cloudprovider.Zone, error) {
FailureDomain: *faultDomain,
Region: az.Location,
}
faultMutex.Unlock()
return zone, nil
}

View File

@ -14,6 +14,7 @@ go_library(
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/container-storage-interface/spec/lib/go/csi:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
@ -38,7 +39,6 @@ go_test(
importpath = "k8s.io/kubernetes/pkg/volume/csi",
library = ":go_default_library",
deps = [
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/csi/fake:go_default_library",
"//pkg/volume/testing:go_default_library",

View File

@ -32,6 +32,7 @@ import (
type csiClient interface {
AssertSupportedVersion(ctx grpctx.Context, ver *csipb.Version) error
NodeProbe(ctx grpctx.Context, ver *csipb.Version) error
NodePublishVolume(
ctx grpctx.Context,
volumeid string,
@ -135,6 +136,13 @@ func (c *csiDriverClient) AssertSupportedVersion(ctx grpctx.Context, ver *csipb.
return nil
}
func (c *csiDriverClient) NodeProbe(ctx grpctx.Context, ver *csipb.Version) error {
glog.V(4).Info(log("sending NodeProbe rpc call to csi driver: [version %v]", ver))
req := &csipb.NodeProbeRequest{Version: ver}
_, err := c.nodeClient.NodeProbe(ctx, req)
return err
}
func (c *csiDriverClient) NodePublishVolume(
ctx grpctx.Context,
volID string,
@ -145,7 +153,7 @@ func (c *csiDriverClient) NodePublishVolume(
volumeAttribs map[string]string,
fsType string,
) error {
glog.V(4).Info(log("calling NodePublishVolume rpc [volid=%s,target_path=%s]", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}
@ -182,7 +190,7 @@ func (c *csiDriverClient) NodePublishVolume(
}
func (c *csiDriverClient) NodeUnpublishVolume(ctx grpctx.Context, volID string, targetPath string) error {
glog.V(4).Info(log("calling NodeUnpublishVolume rpc: [volid=%s, target_path=%s", volID, targetPath))
if volID == "" {
return errors.New("missing volume id")
}

View File

@ -62,6 +62,28 @@ func TestClientAssertSupportedVersion(t *testing.T) {
}
}
func TestClientNodeProbe(t *testing.T) {
testCases := []struct {
testName string
ver *csipb.Version
mustFail bool
err error
}{
{testName: "supported version", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}},
{testName: "grpc error", ver: &csipb.Version{Major: 0, Minor: 1, Patch: 0}, mustFail: true, err: errors.New("grpc error")},
}
for _, tc := range testCases {
t.Log("case: ", tc.testName)
client := setupClient(t)
client.nodeClient.(*fake.NodeClient).SetNextError(tc.err)
err := client.NodeProbe(grpctx.Background(), tc.ver)
if tc.mustFail && err == nil {
t.Error("must fail, but err = nil")
}
}
}
func TestClientNodePublishVolume(t *testing.T) {
testCases := []struct {
name string

View File

@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
"os"
"path"
"github.com/golang/glog"
@ -30,6 +31,24 @@ import (
"k8s.io/client-go/kubernetes"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
//TODO (vladimirvivien) move this in a central loc later
var (
volDataKey = struct {
specVolID,
volHandle,
driverName,
nodeName,
attachmentID string
}{
"specVolID",
"volumeHandle",
"driverName",
"nodeName",
"attachmentID",
}
)
type csiMountMgr struct {
@ -38,6 +57,7 @@ type csiMountMgr struct {
plugin *csiPlugin
driverName string
volumeID string
specVolumeID string
readOnly bool
spec *volume.Spec
pod *api.Pod
@ -51,14 +71,14 @@ type csiMountMgr struct {
var _ volume.Volume = &csiMountMgr{}
func (c *csiMountMgr) GetPath() string {
return getTargetPath(c.podUID, c.driverName, c.volumeID, c.plugin.host)
dir := path.Join(getTargetPath(c.podUID, c.specVolumeID, c.plugin.host), "/mount")
glog.V(4).Info(log("mounter.GetPath generated [%s]", dir))
return dir
}
func getTargetPath(uid types.UID, driverName string, volID string, host volume.VolumeHost) string {
// driverName validated at Mounter creation
// sanitize (replace / with ~) in volumeID before it's appended to path:w
driverPath := fmt.Sprintf("%s/%s", driverName, kstrings.EscapeQualifiedNameForDisk(volID))
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), driverPath)
func getTargetPath(uid types.UID, specVolumeID string, host volume.VolumeHost) string {
specVolID := kstrings.EscapeQualifiedNameForDisk(specVolumeID)
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(csiPluginName), specVolID)
}
// volume.Mounter methods
@ -77,6 +97,17 @@ func (c *csiMountMgr) SetUp(fsGroup *int64) error {
func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
glog.V(4).Infof(log("Mounter.SetUpAt(%s)", dir))
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("mounter.SetUpAt failed while checking mount status for dir [%s]", dir))
return err
}
if mounted {
glog.V(4).Info(log("mounter.SetUpAt skipping mount, dir already mounted [%s]", dir))
return nil
}
csiSource, err := getCSISourceFromSpec(c.spec)
if err != nil {
glog.Error(log("mounter.SetupAt failed to get CSI persistent source: %v", err))
@ -92,13 +123,19 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
// ensure version is supported
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
glog.Errorf(log("failed to assert version: %v", err))
glog.Error(log("mounter.SetUpAt failed to assert version: %v", err))
return err
}
// probe driver
// TODO (vladimirvivien) move probe call where it is done only when it is needed.
if err := csi.NodeProbe(ctx, csiVersion); err != nil {
glog.Error(log("mounter.SetUpAt failed to probe driver: %v", err))
return err
}
// search for attachment by VolumeAttachment.Spec.Source.PersistentVolumeName
if c.volumeInfo == nil {
attachment, err := c.k8s.StorageV1alpha1().VolumeAttachments().Get(attachID, meta.GetOptions{})
if err != nil {
glog.Error(log("mounter.SetupAt failed while getting volume attachment [id=%v]: %v", attachID, err))
@ -121,6 +158,31 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
return err
}
// create target_dir before call to NodePublish
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Error(log("mouter.SetUpAt failed to create dir %#v: %v", dir, err))
return err
}
glog.V(4).Info(log("created target path successfully [%s]", dir))
// persist volume info data for teardown
volData := map[string]string{
volDataKey.specVolID: c.spec.Name(),
volDataKey.volHandle: csiSource.VolumeHandle,
volDataKey.driverName: csiSource.Driver,
volDataKey.nodeName: nodeName,
volDataKey.attachmentID: attachID,
}
if err := saveVolumeData(c.plugin, c.podUID, c.spec.Name(), volData); err != nil {
glog.Error(log("mounter.SetUpAt failed to save volume info data: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetUpAt failed to remove mount dir after a saveVolumeData() error [%s]: %v", dir, err))
return err
}
return err
}
//TODO (vladimirvivien) implement better AccessModes mapping between k8s and CSI
accessMode := api.ReadWriteOnce
if c.spec.PersistentVolume.Spec.AccessModes != nil {
@ -139,11 +201,15 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error {
)
if err != nil {
glog.Errorf(log("Mounter.SetupAt failed: %v", err))
glog.Errorf(log("mounter.SetupAt failed: %v", err))
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetuAt failed to remove mount dir after a NodePublish() error [%s]: %v", dir, err))
return err
}
return err
}
glog.V(4).Infof(log("successfully mounted %s", dir))
glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir))
return nil
}
@ -164,10 +230,30 @@ func (c *csiMountMgr) TearDown() error {
func (c *csiMountMgr) TearDownAt(dir string) error {
glog.V(4).Infof(log("Unmounter.TearDown(%s)", dir))
// extract driverName and volID from path
base, volID := path.Split(dir)
volID = kstrings.UnescapeQualifiedNameForDisk(volID)
driverName := path.Base(base)
// is dir even mounted ?
// TODO (vladimirvivien) this check may not work for an emptyDir or local storage
// see https://github.com/kubernetes/kubernetes/pull/56836#discussion_r155834524
mounted, err := isDirMounted(c.plugin, dir)
if err != nil {
glog.Error(log("unmounter.Teardown failed while checking mount status for dir [%s]: %v", dir, err))
return err
}
if !mounted {
glog.V(4).Info(log("unmounter.Teardown skipping unmout, dir not mounted [%s]", dir))
return nil
}
// load volume info from file
dataDir := path.Dir(dir) // dropoff /mount at end
data, err := loadVolumeData(dataDir, volDataFileName)
if err != nil {
glog.Error(log("unmounter.Teardown failed to load volume data file using dir [%s]: %v", dir, err))
return err
}
volID := data[volDataKey.volHandle]
driverName := data[volDataKey.driverName]
if c.csiClient == nil {
addr := fmt.Sprintf(csiAddrTemplate, driverName)
@ -183,18 +269,21 @@ func (c *csiMountMgr) TearDownAt(dir string) error {
// TODO make all assertion calls private within the client itself
if err := csi.AssertSupportedVersion(ctx, csiVersion); err != nil {
glog.Errorf(log("failed to assert version: %v", err))
glog.Errorf(log("mounter.SetUpAt failed to assert version: %v", err))
return err
}
err := csi.NodeUnpublishVolume(ctx, volID, dir)
if err != nil {
glog.Errorf(log("Mounter.Setup failed: %v", err))
if err := csi.NodeUnpublishVolume(ctx, volID, dir); err != nil {
glog.Errorf(log("mounter.SetUpAt failed: %v", err))
return err
}
glog.V(4).Infof(log("successfully unmounted %s", dir))
// clean mount point dir
if err := removeMountDir(c.plugin, dir); err != nil {
glog.Error(log("mounter.SetUpAt failed to clean mount dir [%s]: %v", dir, err))
return err
}
glog.V(4).Infof(log("mounte.SetUpAt successfully unmounted dir [%s]", dir))
return nil
}
@ -221,3 +310,92 @@ func getVolAttribsFromSpec(spec *volume.Spec) (map[string]string, error) {
}
return attribs, nil
}
// saveVolumeData persists parameter data as json file using the locagion
// generated by /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func saveVolumeData(p *csiPlugin, podUID types.UID, specVolID string, data map[string]string) error {
dir := getTargetPath(podUID, specVolID, p.host)
dataFilePath := path.Join(dir, volDataFileName)
file, err := os.Create(dataFilePath)
if err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
defer file.Close()
if err := json.NewEncoder(file).Encode(data); err != nil {
glog.Error(log("failed to save volume data file %s: %v", dataFilePath, err))
return err
}
glog.V(4).Info(log("volume data file saved successfully [%s]", dataFilePath))
return nil
}
// loadVolumeData uses the directory returned by mounter.GetPath with value
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolumeId>/mount.
// The function extracts specVolumeID and uses it to load the json data file from dir
// /var/lib/kubelet/pods/<podID>/volumes/kubernetes.io~csi/<specVolId>/volume_data.json
func loadVolumeData(dir string, fileName string) (map[string]string, error) {
// remove /mount at the end
dataFileName := path.Join(dir, fileName)
glog.V(4).Info(log("loading volume data file [%s]", dataFileName))
file, err := os.Open(dataFileName)
if err != nil {
glog.Error(log("failed to open volume data file [%s]: %v", dataFileName, err))
return nil, err
}
defer file.Close()
data := map[string]string{}
if err := json.NewDecoder(file).Decode(&data); err != nil {
glog.Error(log("failed to parse volume data file [%s]: %v", dataFileName, err))
return nil, err
}
return data, nil
}
// isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check
func isDirMounted(plug *csiPlugin, dir string) (bool, error) {
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
glog.Error(log("isDirMounted IsLikelyNotMountPoint test failed for dir [%v]", dir))
return false, err
}
return !notMnt, nil
}
// removeMountDir cleans the mount dir when dir is not mounted and removed the volume data file in dir
func removeMountDir(plug *csiPlugin, mountPath string) error {
glog.V(4).Info(log("removing mount path [%s]", mountPath))
if pathExists, pathErr := util.PathExists(mountPath); pathErr != nil {
glog.Error(log("failed while checking mount path stat [%s]", pathErr))
return pathErr
} else if !pathExists {
glog.Warning(log("skipping mount dir removal, path does not exist [%v]", mountPath))
return nil
}
mounter := plug.host.GetMounter(plug.GetPluginName())
notMnt, err := mounter.IsLikelyNotMountPoint(mountPath)
if err != nil {
glog.Error(log("mount dir removal failed [%s]: %v", mountPath, err))
return err
}
if notMnt {
glog.V(4).Info(log("dir not mounted, deleting it [%s]", mountPath))
if err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to remove dir [%s]: %v", mountPath, err))
return err
}
// remove volume data file as well
dataFile := path.Join(path.Dir(mountPath), volDataFileName)
glog.V(4).Info(log("also deleting volume info data file [%s]", dataFile))
if err := os.Remove(dataFile); err != nil && !os.IsNotExist(err) {
glog.Error(log("failed to delete volume data file [%s]: %v", dataFile, err))
return err
}
}
return nil
}

View File

@ -17,7 +17,10 @@ limitations under the License.
package csi
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
@ -43,10 +46,29 @@ func TestMounterGetPath(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
pv := makeTestPV("test-pv", 10, testDriver, testVol)
// TODO (vladimirvivien) specName with slashes will not work
testCases := []struct {
name string
specVolumeName string
path string
}{
{
name: "simple specName",
specVolumeName: "spec-0",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
},
{
name: "specName with dots",
specVolumeName: "test.spec.1",
path: path.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
},
}
for _, tc := range testCases {
t.Log("test case:", tc.name)
pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
mounter, err := plug.NewMounter(
volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
spec,
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
volume.VolumeOptions{},
)
@ -54,17 +76,14 @@ func TestMounterGetPath(t *testing.T) {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
csiMounter := mounter.(*csiMountMgr)
expectedPath := path.Join(tmpDir, fmt.Sprintf(
"pods/%s/volumes/kubernetes.io~csi/%s/%s",
testPodUID,
csiMounter.driverName,
csiMounter.volumeID,
))
mountPath := csiMounter.GetPath()
if mountPath != expectedPath {
t.Errorf("Got unexpected path: %s", mountPath)
}
path := csiMounter.GetPath()
t.Log("*** GetPath: ", path)
if tc.path != path {
t.Errorf("expecting path %s, got %s", tc.path, path)
}
}
}
func TestMounterSetUp(t *testing.T) {
@ -125,6 +144,14 @@ func TestMounterSetUp(t *testing.T) {
if err := csiMounter.SetUp(nil); err != nil {
t.Fatalf("mounter.Setup failed: %v", err)
}
path := csiMounter.GetPath()
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// ensure call went all the way
pubs := csiMounter.csiClient.(*csiDriverClient).nodeClient.(*fake.NodeClient).GetNodePublishedVolumes()
@ -149,6 +176,19 @@ func TestUnmounterTeardown(t *testing.T) {
dir := csiUnmounter.GetPath()
// save the data file prior to unmount
if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", dir, err)
}
if err := saveVolumeData(
plug,
testPodUID,
"test-pv",
map[string]string{volDataKey.specVolID: "test-pv", volDataKey.driverName: "driver", volDataKey.volHandle: "vol-handle"},
); err != nil {
t.Fatal("failed to save volume data:", err)
}
err = csiUnmounter.TearDownAt(dir)
if err != nil {
t.Fatal(err)
@ -208,3 +248,51 @@ func TestGetVolAttribsFromSpec(t *testing.T) {
}
}
}
func TestSaveVolumeData(t *testing.T) {
plug, tmpDir := newTestPlugin(t)
defer os.RemoveAll(tmpDir)
testCases := []struct {
name string
data map[string]string
shouldFail bool
}{
{name: "test with data ok", data: map[string]string{"key0": "val0", "_key1": "val1", "key2": "val2"}},
{name: "test with data ok 2 ", data: map[string]string{"_key0_": "val0", "&key1": "val1", "key2": "val2"}},
}
for i, tc := range testCases {
t.Log("test case:", tc.name)
specVolID := fmt.Sprintf("spec-volid-%d", i)
mountDir := path.Join(getTargetPath(testPodUID, specVolID, plug.host), "/mount")
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
err := saveVolumeData(plug, testPodUID, specVolID, tc.data)
if !tc.shouldFail && err != nil {
t.Error("unexpected failure: ", err)
}
// did file get created
dataDir := getTargetPath(testPodUID, specVolID, plug.host)
file := path.Join(dataDir, volDataFileName)
if _, err := os.Stat(file); err != nil {
t.Error("failed to create data dir:", err)
}
// validate content
data, err := ioutil.ReadFile(file)
if !tc.shouldFail && err != nil {
t.Error("failed to read data file:", err)
}
jsonData := new(bytes.Buffer)
if err := json.NewEncoder(jsonData).Encode(tc.data); err != nil {
t.Error("failed to encode json:", err)
}
if string(data) != jsonData.String() {
t.Errorf("expecting encoded data %v, got %v", string(data), jsonData)
}
}
}

View File

@ -19,7 +19,6 @@ package csi
import (
"errors"
"fmt"
"path"
"regexp"
"time"
@ -29,7 +28,6 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
)
@ -44,6 +42,7 @@ const (
csiAddrTemplate = "/var/lib/kubelet/plugins/%v/csi.sock"
csiTimeout = 15 * time.Second
volNameSep = "^"
volDataFileName = "vol_data.json"
)
var (
@ -141,6 +140,7 @@ func (p *csiPlugin) NewMounter(
podUID: pod.UID,
driverName: pvSource.Driver,
volumeID: pvSource.VolumeHandle,
specVolumeID: spec.Name(),
csiClient: client,
}
return mounter, nil
@ -151,35 +151,31 @@ func (p *csiPlugin) NewUnmounter(specName string, podUID types.UID) (volume.Unmo
unmounter := &csiMountMgr{
plugin: p,
podUID: podUID,
specVolumeID: specName,
}
return unmounter, nil
}
func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
glog.V(4).Infof(log("constructing volume spec [pv.Name=%v, path=%v]", volumeName, mountPath))
glog.V(4).Info(log("plugin.ConstructVolumeSpec [pv.Name=%v, path=%v]", volumeName, mountPath))
// extract driverName/volumeId from end of mountPath
dir, volID := path.Split(mountPath)
volID = kstrings.UnescapeQualifiedNameForDisk(volID)
driverName := path.Base(dir)
// TODO (vladimirvivien) consider moving this check in API validation
if !isDriverNameValid(driverName) {
glog.Error(log("failed while reconstructing volume spec csi: driver name extracted from path is invalid: [path=%s; driverName=%s]", mountPath, driverName))
return nil, errors.New("invalid csi driver name from path")
volData, err := loadVolumeData(mountPath, volDataFileName)
if err != nil {
glog.Error(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
return nil, err
}
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [volumeID=%s; driverName=%s]", volID, driverName))
glog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
pv := &api.PersistentVolume{
ObjectMeta: meta.ObjectMeta{
Name: volumeName,
Name: volData[volDataKey.specVolID],
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
CSI: &api.CSIPersistentVolumeSource{
Driver: driverName,
VolumeHandle: volID,
Driver: volData[volDataKey.driverName],
VolumeHandle: volData[volDataKey.volHandle],
},
},
},

View File

@ -19,6 +19,7 @@ package csi
import (
"fmt"
"os"
"path"
"testing"
api "k8s.io/api/core/v1"
@ -27,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/types"
fakeclient "k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
@ -140,17 +140,31 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
testCases := []struct {
name string
driverName string
volID string
specVolID string
data map[string]string
shouldFail bool
}{
{"valid driver and vol", "test.csi-driver", "abc-cde", false},
{"valid driver + vol with slash", "test.csi-driver", "a/b/c/d", false},
{"invalid driver name", "_test.csi.driver>", "a/b/c/d", true},
{
name: "valid spec name",
specVolID: "test.vol.id",
data: map[string]string{volDataKey.specVolID: "test.vol.id", volDataKey.volHandle: "test-vol0", volDataKey.driverName: "test-driver0"},
},
}
for _, tc := range testCases {
dir := getTargetPath(testPodUID, tc.driverName, tc.volID, plug.host)
t.Logf("test case: %s", tc.name)
dir := getTargetPath(testPodUID, tc.specVolID, plug.host)
// create the data file
if tc.data != nil {
mountDir := path.Join(getTargetPath(testPodUID, tc.specVolID, plug.host), "/mount")
if err := os.MkdirAll(mountDir, 0755); err != nil && !os.IsNotExist(err) {
t.Errorf("failed to create dir [%s]: %v", mountDir, err)
}
if err := saveVolumeData(plug, testPodUID, tc.specVolID, tc.data); err != nil {
t.Fatal(err)
}
}
// rebuild spec
spec, err := plug.ConstructVolumeSpec("test-pv", dir)
@ -161,13 +175,12 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
continue
}
volID := spec.PersistentVolume.Spec.CSI.VolumeHandle
unsanitizedVolID := kstrings.UnescapeQualifiedNameForDisk(tc.volID)
if volID != unsanitizedVolID {
t.Errorf("expected unsanitized volID %s, got volID %s", unsanitizedVolID, volID)
volHandle := spec.PersistentVolume.Spec.CSI.VolumeHandle
if volHandle != tc.data[volDataKey.volHandle] {
t.Errorf("expected volID %s, got volID %s", tc.data[volDataKey.volHandle], volHandle)
}
if spec.Name() != "test-pv" {
if spec.Name() != tc.specVolID {
t.Errorf("Unexpected spec name %s", spec.Name())
}
}

View File

@ -109,6 +109,17 @@ func (f *NodeClient) NodePublishVolume(ctx grpctx.Context, req *csipb.NodePublis
return &csipb.NodePublishVolumeResponse{}, nil
}
// NodeProbe implements csi NodeProbe
func (f *NodeClient) NodeProbe(ctx context.Context, req *csipb.NodeProbeRequest, opts ...grpc.CallOption) (*csipb.NodeProbeResponse, error) {
if f.nextErr != nil {
return nil, f.nextErr
}
if req.Version == nil {
return nil, errors.New("missing version")
}
return &csipb.NodeProbeResponse{}, nil
}
// NodeUnpublishVolume implements csi method
func (f *NodeClient) NodeUnpublishVolume(ctx context.Context, req *csipb.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipb.NodeUnpublishVolumeResponse, error) {
if f.nextErr != nil {
@ -130,11 +141,6 @@ func (f *NodeClient) GetNodeID(ctx context.Context, in *csipb.GetNodeIDRequest,
return nil, nil
}
// NodeProbe implements csi method
func (f *NodeClient) NodeProbe(ctx context.Context, in *csipb.NodeProbeRequest, opts ...grpc.CallOption) (*csipb.NodeProbeResponse, error) {
return nil, nil
}
// NodeGetCapabilities implements csi method
func (f *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipb.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipb.NodeGetCapabilitiesResponse, error) {
return nil, nil

View File

@ -225,6 +225,10 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
}, v1.ResourceRequirements{}),
},
{
evictionPriority: 0, // This pod should not be evicted because it uses less than its limit
pod: diskConsumingPod("container-disk-below-sizelimit", useUnderLimit, nil, v1.ResourceRequirements{Limits: containerLimit}),
},
})
})
})
@ -306,7 +310,8 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
},
{
evictionPriority: 0,
pod: diskConsumingPod("guaranteed-disk", 299 /* Mb */, nil, v1.ResourceRequirements{
// Only require 99% accuracy (297/300 Mb) because on some OS distributions, the file itself (excluding contents), consumes disk space.
pod: diskConsumingPod("guaranteed-disk", 297 /* Mb */, nil, v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceEphemeralStorage: resource.MustParse("300Mi"),
},

View File

@ -59,6 +59,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
}
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
@ -111,7 +113,9 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
By("Waiting for GPUs to become unavailable on the local node")
Eventually(func() bool {
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) <= 0
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return framework.NumberOfNVIDIAGPUs(node) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue())
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1)