mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
Merge branch 'master' into upgrade_aliases_branch
This commit is contained in:
commit
dba2b58202
154
CHANGELOG-1.8.md
154
CHANGELOG-1.8.md
@ -1,52 +1,59 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.8.6](#v186)
|
||||
- [Downloads for v1.8.6](#downloads-for-v186)
|
||||
- [v1.8.7](#v187)
|
||||
- [Downloads for v1.8.7](#downloads-for-v187)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.8.5](#changelog-since-v185)
|
||||
- [Changelog since v1.8.6](#changelog-since-v186)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.8.5](#v185)
|
||||
- [Downloads for v1.8.5](#downloads-for-v185)
|
||||
- [v1.8.6](#v186)
|
||||
- [Downloads for v1.8.6](#downloads-for-v186)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.8.4](#changelog-since-v184)
|
||||
- [Changelog since v1.8.5](#changelog-since-v185)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.8.4](#v184)
|
||||
- [Downloads for v1.8.4](#downloads-for-v184)
|
||||
- [v1.8.5](#v185)
|
||||
- [Downloads for v1.8.5](#downloads-for-v185)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.8.3](#changelog-since-v183)
|
||||
- [Changelog since v1.8.4](#changelog-since-v184)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.8.3](#v183)
|
||||
- [Downloads for v1.8.3](#downloads-for-v183)
|
||||
- [v1.8.4](#v184)
|
||||
- [Downloads for v1.8.4](#downloads-for-v184)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.8.2](#changelog-since-v182)
|
||||
- [Changelog since v1.8.3](#changelog-since-v183)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.8.2](#v182)
|
||||
- [Downloads for v1.8.2](#downloads-for-v182)
|
||||
- [v1.8.3](#v183)
|
||||
- [Downloads for v1.8.3](#downloads-for-v183)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.8.1](#changelog-since-v181)
|
||||
- [Changelog since v1.8.2](#changelog-since-v182)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.8.1](#v181)
|
||||
- [Downloads for v1.8.1](#downloads-for-v181)
|
||||
- [v1.8.2](#v182)
|
||||
- [Downloads for v1.8.2](#downloads-for-v182)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required)
|
||||
- [Changelog since v1.8.1](#changelog-since-v181)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.8.0](#v180)
|
||||
- [Downloads for v1.8.0](#downloads-for-v180)
|
||||
- [v1.8.1](#v181)
|
||||
- [Downloads for v1.8.1](#downloads-for-v181)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.8.0](#v180)
|
||||
- [Downloads for v1.8.0](#downloads-for-v180)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Introduction to v1.8.0](#introduction-to-v180)
|
||||
- [Major Themes](#major-themes)
|
||||
- [SIG API Machinery](#sig-api-machinery)
|
||||
@ -107,49 +114,112 @@
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.8.0-rc.1](#v180-rc1)
|
||||
- [Downloads for v1.8.0-rc.1](#downloads-for-v180-rc1)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
- [v1.8.0-beta.1](#v180-beta1)
|
||||
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
|
||||
- [Client Binaries](#client-binaries-8)
|
||||
- [Server Binaries](#server-binaries-8)
|
||||
- [Node Binaries](#node-binaries-8)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Changelog since v1.8.0-beta.1](#changelog-since-v180-beta1)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-7)
|
||||
- [v1.8.0-alpha.3](#v180-alpha3)
|
||||
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
|
||||
- [v1.8.0-beta.1](#v180-beta1)
|
||||
- [Downloads for v1.8.0-beta.1](#downloads-for-v180-beta1)
|
||||
- [Client Binaries](#client-binaries-9)
|
||||
- [Server Binaries](#server-binaries-9)
|
||||
- [Node Binaries](#node-binaries-9)
|
||||
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-8)
|
||||
- [v1.8.0-alpha.2](#v180-alpha2)
|
||||
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
|
||||
- [v1.8.0-alpha.3](#v180-alpha3)
|
||||
- [Downloads for v1.8.0-alpha.3](#downloads-for-v180-alpha3)
|
||||
- [Client Binaries](#client-binaries-10)
|
||||
- [Server Binaries](#server-binaries-10)
|
||||
- [Node Binaries](#node-binaries-10)
|
||||
- [Changelog since v1.7.0](#changelog-since-v170)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Changelog since v1.8.0-alpha.2](#changelog-since-v180-alpha2)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-9)
|
||||
- [v1.8.0-alpha.1](#v180-alpha1)
|
||||
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
|
||||
- [v1.8.0-alpha.2](#v180-alpha2)
|
||||
- [Downloads for v1.8.0-alpha.2](#downloads-for-v180-alpha2)
|
||||
- [Client Binaries](#client-binaries-11)
|
||||
- [Server Binaries](#server-binaries-11)
|
||||
- [Node Binaries](#node-binaries-11)
|
||||
- [Changelog since v1.7.0](#changelog-since-v170)
|
||||
- [Action Required](#action-required-4)
|
||||
- [Other notable changes](#other-notable-changes-10)
|
||||
- [v1.8.0-alpha.1](#v180-alpha1)
|
||||
- [Downloads for v1.8.0-alpha.1](#downloads-for-v180-alpha1)
|
||||
- [Client Binaries](#client-binaries-12)
|
||||
- [Server Binaries](#server-binaries-12)
|
||||
- [Node Binaries](#node-binaries-12)
|
||||
- [Changelog since v1.7.0-alpha.4](#changelog-since-v170-alpha4)
|
||||
- [Action Required](#action-required-5)
|
||||
- [Other notable changes](#other-notable-changes-10)
|
||||
- [Other notable changes](#other-notable-changes-11)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.8.7
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)
|
||||
|
||||
## Downloads for v1.8.7
|
||||
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes.tar.gz) | `39389e6bc459e96af44dbca38697a14fa292a66e5d5b82cced2ed5cd321b3793`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-src.tar.gz) | `9b9ecc3a6f4b5681038742744e70d1a89ce6fb829106118710df93ff9a69558b`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-darwin-386.tar.gz) | `4f5517d5c1a13921f818e76e7d9639744d166d9289196465f6811bfd6bebb7ee`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-darwin-amd64.tar.gz) | `608a5a88fed518a378f4f30b2bb1743def2366eb99b11825123f9c6ec8117f5e`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-386.tar.gz) | `e4e13b177f313050a68f17793eaf314c53501f7b5225aaa6a5da516ac46b6726`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-amd64.tar.gz) | `b5bd43f15fb091959fd6b4cff739b24da3194d26ed598d512adbd4b59d6a0eaa`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-arm.tar.gz) | `0856ad62860ecedc327cb5162617c4cd3af3f40cd8308fccf0491259da5e5199`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-arm64.tar.gz) | `8c5afcb917fff4c9e927609580cb211d7daa6b7c40b2e4d67766df65b47c9883`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-ppc64le.tar.gz) | `3380e8a50330efa8e626c65ccc5dadcd79c6acacfadb00bb0845271eaf6091b1`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-linux-s390x.tar.gz) | `1ba97be9f269579c2b004a898036a4d4acb7f12455c1bf43d6ab4cd7cb6e1718`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-windows-386.tar.gz) | `1c7718117647e0940e007e1383b20ca438068fc74e42eb017529c6e7ec0c5bfa`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-client-windows-amd64.tar.gz) | `a962223bd349b58f85e86b91d559a3a55ffa48c17322ccc3cf35cf215b5f8633`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-amd64.tar.gz) | `ea3df45a3cd573ba7d1a6d7fcddaf9a2812243560d591f7ba6a497f0467b18b8`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-arm.tar.gz) | `8e4a67569e4182ffe623419b9a16d078f3a3f48f592993e83f25cc08fefd4b3d`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-arm64.tar.gz) | `1fca5b099a180a733cad9a382604d69b9b1a63a4b2bbd40e32d54871f3f06489`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-ppc64le.tar.gz) | `9233ed62830b505abebf6d0c120a9aa1a3eb1fe70cd7750d60552ca9ec0e4f7d`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-server-linux-s390x.tar.gz) | `2ec3385847af78e66b18b1fcf9de7c75c4af26f44c07dfbb37d5d793578a7595`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-amd64.tar.gz) | `79ee543a9c2636f1491715739c3c54cb70ae5b215fe5ce3345e6ff92759ace72`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-arm.tar.gz) | `60c40066bd1b9a6996371a47d1113a7ef30295e9ea37f738cd7ce86cda380516`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-arm64.tar.gz) | `92ee26c0bbb0d016122c38831903ee82d83c33b289463b9f4dc3481e5c096f9c`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-ppc64le.tar.gz) | `965ddb5e7c54975aa5ce35507317f9738db34f799c67e4fc625e150aac7f5c38`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-linux-s390x.tar.gz) | `5e71d983830ab11aff065fe872bea9e9cfc663d62cd9480b4085a2d1bbf8ca95`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.8.7/kubernetes-node-windows-amd64.tar.gz) | `6f364309fd9dc34f7c7bc13d279499fd7c434ce5cfab379f0e9848e5fab497e0`
|
||||
|
||||
## Changelog since v1.8.6
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* fix device name change issue for azure disk: add remount logic ([#57953](https://github.com/kubernetes/kubernetes/pull/57953), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: Allows existing internal load balancers to continue using an outdated subnetwork ([#57861](https://github.com/kubernetes/kubernetes/pull/57861), [@nicksardo](https://github.com/nicksardo))
|
||||
* fix azure disk not available issue when device name changed ([#57549](https://github.com/kubernetes/kubernetes/pull/57549), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* Allow kubernetes components to react to SIGTERM signal and shutdown gracefully. ([#57756](https://github.com/kubernetes/kubernetes/pull/57756), [@mborsz](https://github.com/mborsz))
|
||||
* fix incorrect error info when creating an azure file PVC failed ([#56550](https://github.com/kubernetes/kubernetes/pull/56550), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: Fixes ILB creation on automatic networks with manually created subnetworks. ([#57351](https://github.com/kubernetes/kubernetes/pull/57351), [@nicksardo](https://github.com/nicksardo))
|
||||
* Configurable liveness probe initial delays for etcd and kube-apiserver in GCE ([#57749](https://github.com/kubernetes/kubernetes/pull/57749), [@wojtek-t](https://github.com/wojtek-t))
|
||||
* Fixes a bug where if an error was returned that was not an `autorest.DetailedError` we would return `"not found", nil` which caused nodes to go to `NotReady` state. ([#57484](https://github.com/kubernetes/kubernetes/pull/57484), [@brendandburns](https://github.com/brendandburns))
|
||||
|
||||
|
||||
|
||||
# v1.8.6
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples)
|
||||
|
126
CHANGELOG-1.9.md
126
CHANGELOG-1.9.md
@ -1,16 +1,23 @@
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
- [v1.9.1](#v191)
|
||||
- [Downloads for v1.9.1](#downloads-for-v191)
|
||||
- [v1.9.2](#v192)
|
||||
- [Downloads for v1.9.2](#downloads-for-v192)
|
||||
- [Client Binaries](#client-binaries)
|
||||
- [Server Binaries](#server-binaries)
|
||||
- [Node Binaries](#node-binaries)
|
||||
- [Changelog since v1.9.0](#changelog-since-v190)
|
||||
- [Changelog since v1.9.1](#changelog-since-v191)
|
||||
- [Other notable changes](#other-notable-changes)
|
||||
- [v1.9.0](#v190)
|
||||
- [Downloads for v1.9.0](#downloads-for-v190)
|
||||
- [v1.9.1](#v191)
|
||||
- [Downloads for v1.9.1](#downloads-for-v191)
|
||||
- [Client Binaries](#client-binaries-1)
|
||||
- [Server Binaries](#server-binaries-1)
|
||||
- [Node Binaries](#node-binaries-1)
|
||||
- [Changelog since v1.9.0](#changelog-since-v190)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.9.0](#v190)
|
||||
- [Downloads for v1.9.0](#downloads-for-v190)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [1.9 Release Notes](#19-release-notes)
|
||||
- [WARNING: etcd backup strongly recommended](#warning-etcd-backup-strongly-recommended)
|
||||
- [Introduction to 1.9.0](#introduction-to-190)
|
||||
@ -98,48 +105,117 @@
|
||||
- [External Dependencies](#external-dependencies)
|
||||
- [v1.9.0-beta.2](#v190-beta2)
|
||||
- [Downloads for v1.9.0-beta.2](#downloads-for-v190-beta2)
|
||||
- [Client Binaries](#client-binaries-2)
|
||||
- [Server Binaries](#server-binaries-2)
|
||||
- [Node Binaries](#node-binaries-2)
|
||||
- [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1)
|
||||
- [Other notable changes](#other-notable-changes-1)
|
||||
- [v1.9.0-beta.1](#v190-beta1)
|
||||
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
|
||||
- [Client Binaries](#client-binaries-3)
|
||||
- [Server Binaries](#server-binaries-3)
|
||||
- [Node Binaries](#node-binaries-3)
|
||||
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Changelog since v1.9.0-beta.1](#changelog-since-v190-beta1)
|
||||
- [Other notable changes](#other-notable-changes-2)
|
||||
- [v1.9.0-alpha.3](#v190-alpha3)
|
||||
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
|
||||
- [v1.9.0-beta.1](#v190-beta1)
|
||||
- [Downloads for v1.9.0-beta.1](#downloads-for-v190-beta1)
|
||||
- [Client Binaries](#client-binaries-4)
|
||||
- [Server Binaries](#server-binaries-4)
|
||||
- [Node Binaries](#node-binaries-4)
|
||||
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Changelog since v1.9.0-alpha.3](#changelog-since-v190-alpha3)
|
||||
- [Action Required](#action-required)
|
||||
- [Other notable changes](#other-notable-changes-3)
|
||||
- [v1.9.0-alpha.2](#v190-alpha2)
|
||||
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
|
||||
- [v1.9.0-alpha.3](#v190-alpha3)
|
||||
- [Downloads for v1.9.0-alpha.3](#downloads-for-v190-alpha3)
|
||||
- [Client Binaries](#client-binaries-5)
|
||||
- [Server Binaries](#server-binaries-5)
|
||||
- [Node Binaries](#node-binaries-5)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Changelog since v1.9.0-alpha.2](#changelog-since-v190-alpha2)
|
||||
- [Action Required](#action-required-1)
|
||||
- [Other notable changes](#other-notable-changes-4)
|
||||
- [v1.9.0-alpha.1](#v190-alpha1)
|
||||
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
|
||||
- [v1.9.0-alpha.2](#v190-alpha2)
|
||||
- [Downloads for v1.9.0-alpha.2](#downloads-for-v190-alpha2)
|
||||
- [Client Binaries](#client-binaries-6)
|
||||
- [Server Binaries](#server-binaries-6)
|
||||
- [Node Binaries](#node-binaries-6)
|
||||
- [Changelog since v1.8.0](#changelog-since-v180)
|
||||
- [Action Required](#action-required-2)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [v1.9.0-alpha.1](#v190-alpha1)
|
||||
- [Downloads for v1.9.0-alpha.1](#downloads-for-v190-alpha1)
|
||||
- [Client Binaries](#client-binaries-7)
|
||||
- [Server Binaries](#server-binaries-7)
|
||||
- [Node Binaries](#node-binaries-7)
|
||||
- [Changelog since v1.8.0-alpha.3](#changelog-since-v180-alpha3)
|
||||
- [Action Required](#action-required-3)
|
||||
- [Other notable changes](#other-notable-changes-5)
|
||||
- [Other notable changes](#other-notable-changes-6)
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
<!-- NEW RELEASE NOTES ENTRY -->
|
||||
|
||||
|
||||
# v1.9.2
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)
|
||||
|
||||
## Downloads for v1.9.2
|
||||
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes.tar.gz) | `7a922d49b1194cb1b59b22cecb4eb1197f7c37250d4326410dc71aa5dc5ec8a2`
|
||||
[kubernetes-src.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-src.tar.gz) | `9f128809cdd442d71a13f7c61c7a0e03e832cf0c068a86184c1bcc9acdb78872`
|
||||
|
||||
### Client Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-darwin-386.tar.gz) | `37d2dd1b1762f1040699584736bbc1a2392e94779a19061d477786bcce3d3f01`
|
||||
[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-darwin-amd64.tar.gz) | `42adc9762b30bfd3648323f9a8f350efeedec08a901997073f6d4244f7a16f78`
|
||||
[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-386.tar.gz) | `5dde6c6388353376aaa0bd731b0366d9d2d11baee3746662b008e09d9618d55f`
|
||||
[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-amd64.tar.gz) | `c45cf9e9d27b9d1bfc6d26f86856271fec6f8e7007f014597d27668f72f8c349`
|
||||
[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-arm64.tar.gz) | `05c3810b00adcdbf7bc67671847f11e287da72f308cc704e5679e83564236fee`
|
||||
[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-arm.tar.gz) | `a9421d4627eb9eaa1e46cfd4276943e25b5b80e52db6945f173a2a45782ce42d`
|
||||
[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-ppc64le.tar.gz) | `adc345ab050e09a3069a47e862c0ce88630a586905b33f6e5fd339005ceffbbf`
|
||||
[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-linux-s390x.tar.gz) | `fdff4b462e67569a4a1110b696d8af2c563e0a19e50a58a7b1a4346942b07993`
|
||||
[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-windows-386.tar.gz) | `1a82e8e4213153993a6e86e74120f62f95645952b223ed8586316358dd22a225`
|
||||
[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-client-windows-amd64.tar.gz) | `a8648d4d3e0f85597bd57de87459a040ceab4c073d647027a70b0fba8862eab3`
|
||||
|
||||
### Server Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-amd64.tar.gz) | `2218fe0b939273b57ce00c7d5f3f7d2c34ebde5ae500ba2646eea6ba26c7c63d`
|
||||
[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-arm64.tar.gz) | `3b4bc6cf91c3eaf37ef2b361dd77e838f0a8ca2b8cbb4dd42793c1fea5186b69`
|
||||
[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-arm.tar.gz) | `73e77da0ddc951f791b5f7b73420ba0dbb141b3637cc48b4e916a41249e40ce3`
|
||||
[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-ppc64le.tar.gz) | `860ba4ac773e4aff69dde781cac7ac1fb1824f2158155dfa49c50dd3acf0ab82`
|
||||
[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-server-linux-s390x.tar.gz) | `19e0fd7863e217b4cb67f91b56ceb5939ae677f523681bdf8ccac174f36f576d`
|
||||
|
||||
### Node Binaries
|
||||
|
||||
filename | sha256 hash
|
||||
-------- | -----------
|
||||
[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-amd64.tar.gz) | `f86b7038dc89d79b277c5fba499f391c25f5aba8f5caa3119c05065f9917b6f9`
|
||||
[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-arm64.tar.gz) | `87f40c37a3e359a9350a3bcbe0e27ad6e7dfa0d8ee5f6d2ecf061813423ffa73`
|
||||
[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-arm.tar.gz) | `b73d879a03e7eba5543af0b56085ebb4919d401f6a06d4803517ddf606e8240e`
|
||||
[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-ppc64le.tar.gz) | `26331e5d84d98fc3a94d2d55fd411159b2a79b6083758cea1dac36a0a4a44336`
|
||||
[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-linux-s390x.tar.gz) | `cbf52f3942965bb659d1f0f624e09ff01b2ee9f6e6217b3876c41600e1d4c711`
|
||||
[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.9.2/kubernetes-node-windows-amd64.tar.gz) | `70d59046a7c949d4fd4850ee57b1cd44dddfb041c548a21354ee30d7bfb1003d`
|
||||
|
||||
## Changelog since v1.9.1
|
||||
|
||||
### Other notable changes
|
||||
|
||||
* Fixes authentication problem faced during various vSphere operations. ([#57978](https://github.com/kubernetes/kubernetes/pull/57978), [@prashima](https://github.com/prashima))
|
||||
* The getSubnetIDForLB() should return subnet id rather than net id. ([#58208](https://github.com/kubernetes/kubernetes/pull/58208), [@FengyunPan](https://github.com/FengyunPan))
|
||||
* Add cache for VM get operation in azure cloud provider ([#57432](https://github.com/kubernetes/kubernetes/pull/57432), [@karataliu](https://github.com/karataliu))
|
||||
* Update kube-dns to Version 1.14.8 that includes only small changes to how Prometheus metrics are collected. ([#57918](https://github.com/kubernetes/kubernetes/pull/57918), [@rramkumar1](https://github.com/rramkumar1))
|
||||
* Fixes a possible deadlock preventing quota from being recalculated ([#58107](https://github.com/kubernetes/kubernetes/pull/58107), [@ironcladlou](https://github.com/ironcladlou))
|
||||
* Fixes a bug in Heapster deployment for google sink. ([#57902](https://github.com/kubernetes/kubernetes/pull/57902), [@kawych](https://github.com/kawych))
|
||||
* GCE: Allows existing internal load balancers to continue using an outdated subnetwork ([#57861](https://github.com/kubernetes/kubernetes/pull/57861), [@nicksardo](https://github.com/nicksardo))
|
||||
* Update etcd version to 3.1.11 ([#57811](https://github.com/kubernetes/kubernetes/pull/57811), [@xiangpengzhao](https://github.com/xiangpengzhao))
|
||||
* fix device name change issue for azure disk: add remount logic ([#57953](https://github.com/kubernetes/kubernetes/pull/57953), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* calico-node addon tolerates all NoExecute and NoSchedule taints by default. ([#57122](https://github.com/kubernetes/kubernetes/pull/57122), [@caseydavenport](https://github.com/caseydavenport))
|
||||
* Allow kubernetes components to react to SIGTERM signal and shutdown gracefully. ([#57756](https://github.com/kubernetes/kubernetes/pull/57756), [@mborsz](https://github.com/mborsz))
|
||||
* Fixes controller manager crash in certain vSphere cloud provider environment. ([#57286](https://github.com/kubernetes/kubernetes/pull/57286), [@rohitjogvmw](https://github.com/rohitjogvmw))
|
||||
* fix azure disk not available issue when device name changed ([#57549](https://github.com/kubernetes/kubernetes/pull/57549), [@andyzhangx](https://github.com/andyzhangx))
|
||||
* GCE: support passing kube-scheduler policy config via SCHEDULER_POLICY_CONFIG ([#57425](https://github.com/kubernetes/kubernetes/pull/57425), [@yguo0905](https://github.com/yguo0905))
|
||||
|
||||
|
||||
|
||||
# v1.9.1
|
||||
|
||||
[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples)
|
||||
|
6
Godeps/Godeps.json
generated
6
Godeps/Godeps.json
generated
@ -2568,11 +2568,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57"
|
||||
"Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra/doc",
|
||||
"Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57"
|
||||
"Rev": "19e54c4a2b8a78c9d54b2bed61b1a6c5e1bfcf6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/jwalterweatherman",
|
||||
@ -2580,7 +2580,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
"Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/viper",
|
||||
|
10
api/openapi-spec/swagger.json
generated
10
api/openapi-spec/swagger.json
generated
@ -74526,6 +74526,10 @@
|
||||
"description": "Driver is the name of the driver to use for this volume. Required.",
|
||||
"type": "string"
|
||||
},
|
||||
"fsType": {
|
||||
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
|
||||
"type": "string"
|
||||
},
|
||||
"readOnly": {
|
||||
"description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
|
||||
"type": "boolean"
|
||||
@ -77568,7 +77572,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"qosClass": {
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md",
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md",
|
||||
"type": "string"
|
||||
},
|
||||
"reason": {
|
||||
@ -82131,7 +82135,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"value": {
|
||||
@ -82170,7 +82174,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata",
|
||||
"description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
|
||||
}
|
||||
},
|
||||
|
4
api/swagger-spec/scheduling.k8s.io_v1alpha1.json
generated
4
api/swagger-spec/scheduling.k8s.io_v1alpha1.json
generated
@ -744,7 +744,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ListMeta",
|
||||
"description": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"
|
||||
"description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
@ -790,7 +790,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ObjectMeta",
|
||||
"description": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata"
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata"
|
||||
},
|
||||
"value": {
|
||||
"type": "integer",
|
||||
|
6
api/swagger-spec/v1.json
generated
6
api/swagger-spec/v1.json
generated
@ -21320,6 +21320,10 @@
|
||||
"readOnly": {
|
||||
"type": "boolean",
|
||||
"description": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write)."
|
||||
},
|
||||
"fsType": {
|
||||
"type": "string",
|
||||
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -23155,7 +23159,7 @@
|
||||
},
|
||||
"qosClass": {
|
||||
"type": "string",
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md"
|
||||
"description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -100,12 +100,10 @@ The main output is a tar file: `kubernetes.tar.gz`. This includes:
|
||||
* Examples
|
||||
* Cluster deployment scripts for various clouds
|
||||
* Tar file containing all server binaries
|
||||
* Tar file containing salt deployment tree shared across multiple cloud deployments.
|
||||
|
||||
In addition, there are some other tar files that are created:
|
||||
* `kubernetes-client-*.tar.gz` Client binaries for a specific platform.
|
||||
* `kubernetes-server-*.tar.gz` Server binaries for a specific platform.
|
||||
* `kubernetes-salt.tar.gz` The salt script/tree shared across multiple deployment scripts.
|
||||
|
||||
When building final release tars, they are first staged into `_output/release-stage` before being tar'd up and put into `_output/release-tars`.
|
||||
|
||||
|
@ -78,7 +78,6 @@ function kube::release::package_tarballs() {
|
||||
mkdir -p "${RELEASE_TARS}"
|
||||
kube::release::package_src_tarball &
|
||||
kube::release::package_client_tarballs &
|
||||
kube::release::package_salt_tarball &
|
||||
kube::release::package_kube_manifests_tarball &
|
||||
kube::util::wait-for-jobs || { kube::log::error "previous tarball phase failed"; return 1; }
|
||||
|
||||
@ -359,70 +358,39 @@ function kube::release::create_docker_images_for_server() {
|
||||
|
||||
}
|
||||
|
||||
# Package up the salt configuration tree. This is an optional helper to getting
|
||||
# a cluster up and running.
|
||||
function kube::release::package_salt_tarball() {
|
||||
kube::log::status "Building tarball: salt"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/salt/kubernetes"
|
||||
rm -rf "${release_stage}"
|
||||
mkdir -p "${release_stage}"
|
||||
|
||||
cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/"
|
||||
|
||||
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
|
||||
# yaml.in, json files in cluster/addons (minus any demos) and overlays
|
||||
# them into kube-addons, where we expect them. (This pipeline is a
|
||||
# fancy copy, stripping anything but the files we don't want.)
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/saltbase/salt/kube-addons"
|
||||
|
||||
kube::release::clean_cruft
|
||||
|
||||
local package_name="${RELEASE_TARS}/kubernetes-salt.tar.gz"
|
||||
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
||||
}
|
||||
|
||||
# This will pack kube-system manifests files for distros without using salt
|
||||
# such as GCI and Ubuntu Trusty. We directly copy manifests from
|
||||
# cluster/addons and cluster/saltbase/salt. The script of cluster initialization
|
||||
# will remove the salt configuration and evaluate the variables in the manifests.
|
||||
# This will pack kube-system manifests files for distros such as COS.
|
||||
function kube::release::package_kube_manifests_tarball() {
|
||||
kube::log::status "Building tarball: manifests"
|
||||
|
||||
local salt_dir="${KUBE_ROOT}/cluster/saltbase/salt"
|
||||
local src_dir="${KUBE_ROOT}/cluster/gce/manifests"
|
||||
|
||||
local release_stage="${RELEASE_STAGE}/manifests/kubernetes"
|
||||
rm -rf "${release_stage}"
|
||||
|
||||
mkdir -p "${release_stage}"
|
||||
cp "${salt_dir}/kube-registry-proxy/kube-registry-proxy.yaml" "${release_stage}/"
|
||||
cp "${salt_dir}/kube-proxy/kube-proxy.manifest" "${release_stage}/"
|
||||
|
||||
local gci_dst_dir="${release_stage}/gci-trusty"
|
||||
mkdir -p "${gci_dst_dir}"
|
||||
cp "${salt_dir}/cluster-autoscaler/cluster-autoscaler.manifest" "${gci_dst_dir}/"
|
||||
cp "${salt_dir}/etcd/etcd.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-scheduler/kube-scheduler.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-apiserver/kube-apiserver.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-apiserver/abac-authz-policy.jsonl" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-controller-manager/kube-controller-manager.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/l7-gcp/glbc.manifest" "${gci_dst_dir}"
|
||||
cp "${salt_dir}/rescheduler/rescheduler.manifest" "${gci_dst_dir}/"
|
||||
cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${gci_dst_dir}/"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${gci_dst_dir}/gci-configure-helper.sh"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${gci_dst_dir}/health-monitor.sh"
|
||||
cp -r "${salt_dir}/kube-admission-controls/limit-range" "${gci_dst_dir}"
|
||||
local dst_dir="${release_stage}/gci-trusty"
|
||||
mkdir -p "${dst_dir}"
|
||||
cp "${src_dir}/kube-registry-proxy.yaml" "${dst_dir}/"
|
||||
cp "${src_dir}/kube-proxy.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/cluster-autoscaler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/etcd.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-scheduler.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-apiserver.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/abac-authz-policy.jsonl" "${dst_dir}"
|
||||
cp "${src_dir}/kube-controller-manager.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/kube-addon-manager.yaml" "${dst_dir}"
|
||||
cp "${src_dir}/glbc.manifest" "${dst_dir}"
|
||||
cp "${src_dir}/rescheduler.manifest" "${dst_dir}/"
|
||||
cp "${src_dir}/e2e-image-puller.manifest" "${dst_dir}/"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
|
||||
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) | grep -v demo)
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${gci_dst_dir}"
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${dst_dir}"
|
||||
# Merge GCE-specific addons with general purpose addons.
|
||||
local gce_objects
|
||||
gce_objects=$(cd "${KUBE_ROOT}/cluster/gce/addons" && find . \( -name \*.yaml -or -name \*.yaml.in -or -name \*.json \) \( -not -name \*demo\* \))
|
||||
if [[ -n "${gce_objects}" ]]; then
|
||||
tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${gci_dst_dir}"
|
||||
tar c -C "${KUBE_ROOT}/cluster/gce/addons" ${gce_objects} | tar x -C "${dst_dir}"
|
||||
fi
|
||||
|
||||
kube::release::clean_cruft
|
||||
@ -471,8 +439,7 @@ function kube::release::package_test_tarball() {
|
||||
# using the bundled cluster/get-kube-binaries.sh script).
|
||||
# Included in this tarball:
|
||||
# - Cluster spin up/down scripts and configs for various cloud providers
|
||||
# - Tarballs for salt configs that are ready to be uploaded
|
||||
# to master by whatever means appropriate.
|
||||
# - Tarballs for manifest configs that are ready to be uploaded
|
||||
# - Examples (which may or may not still work)
|
||||
# - The remnants of the docs/ directory
|
||||
function kube::release::package_final_tarball() {
|
||||
@ -491,13 +458,10 @@ Client binaries are no longer included in the Kubernetes final tarball.
|
||||
Run cluster/get-kube-binaries.sh to download client and server binaries.
|
||||
EOF
|
||||
|
||||
# We want everything in /cluster except saltbase. That is only needed on the
|
||||
# server.
|
||||
# We want everything in /cluster.
|
||||
cp -R "${KUBE_ROOT}/cluster" "${release_stage}/"
|
||||
rm -rf "${release_stage}/cluster/saltbase"
|
||||
|
||||
mkdir -p "${release_stage}/server"
|
||||
cp "${RELEASE_TARS}/kubernetes-salt.tar.gz" "${release_stage}/server/"
|
||||
cp "${RELEASE_TARS}/kubernetes-manifests.tar.gz" "${release_stage}/server/"
|
||||
cat <<EOF > "${release_stage}/server/README"
|
||||
Server binary tarballs are no longer included in the Kubernetes final tarball.
|
||||
|
@ -180,7 +180,6 @@ pkg_tar(
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
files = [
|
||||
":kubernetes-manifests.tar.gz",
|
||||
":kubernetes-salt.tar.gz",
|
||||
],
|
||||
package_dir = "server",
|
||||
visibility = ["//visibility:private"],
|
||||
@ -216,15 +215,6 @@ pkg_tar(
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "kubernetes-salt",
|
||||
build_tar = "@io_kubernetes_build//tools/build_tar",
|
||||
extension = "tar.gz",
|
||||
deps = [
|
||||
"//cluster/saltbase:salt",
|
||||
],
|
||||
)
|
||||
|
||||
release_filegroup(
|
||||
name = "release-tars",
|
||||
srcs = [
|
||||
@ -233,7 +223,6 @@ release_filegroup(
|
||||
":kubernetes-node-%s.tar.gz" % PLATFORM_ARCH_STRING,
|
||||
":kubernetes-server-%s.tar.gz" % PLATFORM_ARCH_STRING,
|
||||
":kubernetes-manifests.tar.gz",
|
||||
":kubernetes-salt.tar.gz",
|
||||
":kubernetes-src.tar.gz",
|
||||
":kubernetes-test.tar.gz",
|
||||
],
|
||||
|
@ -20,32 +20,19 @@ filegroup(
|
||||
"//cluster/images/etcd/rollback:all-srcs",
|
||||
"//cluster/images/hyperkube:all-srcs",
|
||||
"//cluster/images/kubemark:all-srcs",
|
||||
"//cluster/saltbase:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# All of the manifests that are expected to be in a "gci-trusty"
|
||||
# subdir of the manifests tarball.
|
||||
pkg_tar(
|
||||
name = "_manifests-gci-trusty",
|
||||
package_dir = "gci-trusty",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cluster/addons",
|
||||
"//cluster/gce:gci-trusty-manifests",
|
||||
"//cluster/gce/addons",
|
||||
"//cluster/saltbase:gci-trusty-salt-manifests",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "manifests",
|
||||
mode = "0644",
|
||||
package_dir = "kubernetes",
|
||||
package_dir = "kubernetes/gci-trusty",
|
||||
deps = [
|
||||
":_manifests-gci-trusty",
|
||||
"//cluster/saltbase:salt-manifests",
|
||||
"//cluster/addons",
|
||||
"//cluster/gce:gce-master-manifests",
|
||||
"//cluster/gce:gci-trusty-manifests",
|
||||
"//cluster/gce/addons",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
IMAGE=gcr.io/google-containers/kube-addon-manager
|
||||
ARCH?=amd64
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
VERSION=v8.4
|
||||
VERSION=v8.5
|
||||
KUBECTL_VERSION?=v1.8.4
|
||||
|
||||
ifeq ($(ARCH),amd64)
|
||||
|
@ -155,7 +155,7 @@ function is_leader() {
|
||||
fi
|
||||
KUBE_CONTROLLER_MANAGER_LEADER=`${KUBECTL} -n kube-system get ep kube-controller-manager \
|
||||
-o go-template=$'{{index .metadata.annotations "control-plane.alpha.kubernetes.io/leader"}}' \
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/'`
|
||||
| sed 's/^.*"holderIdentity":"\([^"]*\)".*/\1/' | awk -F'_' '{print $1}'`
|
||||
# If there was any problem with getting the leader election results, var will
|
||||
# be empty. Since it's better to have multiple addon managers than no addon
|
||||
# managers at all, we're going to assume that we're the leader in such case.
|
||||
|
@ -47,7 +47,7 @@ roleRef:
|
||||
apiGroup: ""
|
||||
---
|
||||
# Elasticsearch deployment itself
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
|
@ -129,17 +129,6 @@ data:
|
||||
max_lines 1000
|
||||
</match>
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
|
||||
<source>
|
||||
type tail
|
||||
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
|
||||
time_format %Y-%m-%d %H:%M:%S
|
||||
path /var/log/salt/minion
|
||||
pos_file /var/log/es-salt.pos
|
||||
tag salt
|
||||
</source>
|
||||
|
||||
# Example:
|
||||
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
|
||||
<source>
|
||||
|
@ -45,7 +45,7 @@ roleRef:
|
||||
name: fluentd-es
|
||||
apiGroup: ""
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.0.3
|
||||
|
@ -1,4 +1,4 @@
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
|
@ -81,7 +81,7 @@ spec:
|
||||
- --config-dir=/etc/config
|
||||
- --cpu=40m
|
||||
- --extra-cpu=0.5m
|
||||
- --memory=140Mi
|
||||
- --memory=40Mi
|
||||
- --extra-memory=4Mi
|
||||
- --threshold=5
|
||||
- --deployment=metrics-server-v0.2.1
|
||||
|
@ -33,11 +33,6 @@ mkdir -p "$cert_dir"
|
||||
|
||||
use_cn=false
|
||||
|
||||
# TODO: Add support for discovery on other providers?
|
||||
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
|
||||
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
|
||||
fi
|
||||
|
||||
sans="IP:${cert_ip}"
|
||||
if [[ -n "${extra_sans}" ]]; then
|
||||
sans="${sans},${extra_sans}"
|
@ -234,7 +234,7 @@ echo "[INFO] tear-down-node on $1"
|
||||
# Generate the CA certificates for k8s components
|
||||
function make-ca-cert() {
|
||||
echo "[INFO] make-ca-cert"
|
||||
bash "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
|
||||
bash "${ROOT}/make-ca-cert.sh" "${MASTER_ADVERTISE_IP}" "IP:${MASTER_ADVERTISE_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
|
||||
}
|
||||
|
||||
# Provision master
|
||||
|
@ -351,8 +351,6 @@ function set_binary_version() {
|
||||
# KUBE_TAR_HASH
|
||||
# SERVER_BINARY_TAR_URL
|
||||
# SERVER_BINARY_TAR_HASH
|
||||
# SALT_TAR_URL
|
||||
# SALT_TAR_HASH
|
||||
function tars_from_version() {
|
||||
local sha1sum=""
|
||||
if which sha1sum >/dev/null 2>&1; then
|
||||
@ -366,13 +364,11 @@ function tars_from_version() {
|
||||
upload-server-tars
|
||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_RELEASE_VERSION_REGEX} ]]; then
|
||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/kubernetes-salt.tar.gz"
|
||||
# TODO: Clean this up.
|
||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||
elif [[ ${KUBE_VERSION} =~ ${KUBE_CI_VERSION_REGEX} ]]; then
|
||||
SERVER_BINARY_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz"
|
||||
SALT_TAR_URL="https://storage.googleapis.com/kubernetes-release-dev/ci/${KUBE_VERSION}/kubernetes-salt.tar.gz"
|
||||
# TODO: Clean this up.
|
||||
KUBE_MANIFESTS_TAR_URL="${SERVER_BINARY_TAR_URL/server-linux-amd64/manifests}"
|
||||
KUBE_MANIFESTS_TAR_HASH=$(curl ${KUBE_MANIFESTS_TAR_URL} --silent --show-error | ${sha1sum} | awk '{print $1}')
|
||||
@ -383,18 +379,11 @@ function tars_from_version() {
|
||||
if ! SERVER_BINARY_TAR_HASH=$(curl -Ss --fail "${SERVER_BINARY_TAR_URL}.sha1"); then
|
||||
echo "Failure trying to curl release .sha1"
|
||||
fi
|
||||
if ! SALT_TAR_HASH=$(curl -Ss --fail "${SALT_TAR_URL}.sha1"); then
|
||||
echo "Failure trying to curl Salt tar .sha1"
|
||||
fi
|
||||
|
||||
if ! curl -Ss --head "${SERVER_BINARY_TAR_URL}" >&/dev/null; then
|
||||
echo "Can't find release at ${SERVER_BINARY_TAR_URL}" >&2
|
||||
exit 1
|
||||
fi
|
||||
if ! curl -Ss --head "${SALT_TAR_URL}" >&/dev/null; then
|
||||
echo "Can't find Salt tar at ${SALT_TAR_URL}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Search for the specified tarball in the various known output locations,
|
||||
@ -427,11 +416,9 @@ function find-tar() {
|
||||
# KUBE_ROOT
|
||||
# Vars set:
|
||||
# SERVER_BINARY_TAR
|
||||
# SALT_TAR
|
||||
# KUBE_MANIFESTS_TAR
|
||||
function find-release-tars() {
|
||||
SERVER_BINARY_TAR=$(find-tar kubernetes-server-linux-amd64.tar.gz)
|
||||
SALT_TAR=$(find-tar kubernetes-salt.tar.gz)
|
||||
|
||||
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
|
||||
KUBE_MANIFESTS_TAR=
|
||||
@ -573,13 +560,11 @@ function build-kube-env {
|
||||
local file=$2
|
||||
|
||||
local server_binary_tar_url=$SERVER_BINARY_TAR_URL
|
||||
local salt_tar_url=$SALT_TAR_URL
|
||||
local kube_manifests_tar_url="${KUBE_MANIFESTS_TAR_URL:-}"
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] ; then
|
||||
# TODO: Support fallback .tar.gz settings on Container Linux
|
||||
server_binary_tar_url=$(split_csv "${SERVER_BINARY_TAR_URL}")
|
||||
salt_tar_url=$(split_csv "${SALT_TAR_URL}")
|
||||
kube_manifests_tar_url=$(split_csv "${KUBE_MANIFESTS_TAR_URL}")
|
||||
fi
|
||||
|
||||
@ -600,8 +585,6 @@ SERVER_BINARY_TAR_URL: $(yaml-quote ${server_binary_tar_url})
|
||||
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
|
||||
PROJECT_ID: $(yaml-quote ${PROJECT})
|
||||
NETWORK_PROJECT_ID: $(yaml-quote ${NETWORK_PROJECT})
|
||||
SALT_TAR_URL: $(yaml-quote ${salt_tar_url})
|
||||
SALT_TAR_HASH: $(yaml-quote ${SALT_TAR_HASH})
|
||||
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
|
||||
KUBERNETES_MASTER_NAME: $(yaml-quote ${KUBERNETES_MASTER_NAME})
|
||||
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
|
||||
|
@ -49,3 +49,21 @@ release_filegroup(
|
||||
"gci/node.yaml",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gce-master-manifests",
|
||||
files = [
|
||||
"manifests/abac-authz-policy.jsonl",
|
||||
"manifests/cluster-autoscaler.manifest",
|
||||
"manifests/e2e-image-puller.manifest",
|
||||
"manifests/etcd.manifest",
|
||||
"manifests/glbc.manifest",
|
||||
"manifests/kube-addon-manager.yaml",
|
||||
"manifests/kube-apiserver.manifest",
|
||||
"manifests/kube-controller-manager.manifest",
|
||||
"manifests/kube-proxy.manifest",
|
||||
"manifests/kube-scheduler.manifest",
|
||||
"manifests/rescheduler.manifest",
|
||||
],
|
||||
mode = "0644",
|
||||
)
|
||||
|
@ -1334,7 +1334,7 @@ function prepare-kube-proxy-manifest-variables {
|
||||
function start-kube-proxy {
|
||||
echo "Start kube-proxy static pod"
|
||||
prepare-log-file /var/log/kube-proxy.log
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-proxy.manifest"
|
||||
prepare-kube-proxy-manifest-variables "${src_file}"
|
||||
|
||||
cp "${src_file}" /etc/kubernetes/manifests
|
||||
@ -2077,7 +2077,7 @@ EOF
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
|
||||
|
||||
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
|
||||
setup-addon-manifests "addons" "dns-horizontal-autoscaler" "gce"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -2225,7 +2225,7 @@ EOF
|
||||
setup-addon-manifests "addons" "node-problem-detector/standalone" "node-problem-detector"
|
||||
fi
|
||||
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
|
||||
setup-addon-manifests "admission-controls" "limit-range"
|
||||
setup-addon-manifests "admission-controls" "limit-range" "gce"
|
||||
fi
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
setup-addon-manifests "addons" "calico-policy-controller"
|
||||
@ -2260,7 +2260,7 @@ function start-image-puller {
|
||||
# Starts kube-registry proxy
|
||||
function start-kube-registry-proxy {
|
||||
echo "Start kube-registry-proxy"
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
|
||||
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-registry-proxy.yaml" /etc/kubernetes/manifests
|
||||
}
|
||||
|
||||
# Starts a l7 loadbalancing controller for ingress.
|
||||
|
@ -255,21 +255,16 @@ fi
|
||||
# Assumed vars:
|
||||
# PROJECT
|
||||
# SERVER_BINARY_TAR
|
||||
# SALT_TAR
|
||||
# KUBE_MANIFESTS_TAR
|
||||
# ZONE
|
||||
# Vars set:
|
||||
# SERVER_BINARY_TAR_URL
|
||||
# SERVER_BINARY_TAR_HASH
|
||||
# SALT_TAR_URL
|
||||
# SALT_TAR_HASH
|
||||
# KUBE_MANIFESTS_TAR_URL
|
||||
# KUBE_MANIFESTS_TAR_HASH
|
||||
function upload-server-tars() {
|
||||
SERVER_BINARY_TAR_URL=
|
||||
SERVER_BINARY_TAR_HASH=
|
||||
SALT_TAR_URL=
|
||||
SALT_TAR_HASH=
|
||||
KUBE_MANIFESTS_TAR_URL=
|
||||
KUBE_MANIFESTS_TAR_HASH=
|
||||
|
||||
@ -287,13 +282,11 @@ function upload-server-tars() {
|
||||
set-preferred-region
|
||||
|
||||
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
|
||||
SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}")
|
||||
fi
|
||||
|
||||
local server_binary_tar_urls=()
|
||||
local salt_tar_urls=()
|
||||
local kube_manifest_tar_urls=()
|
||||
|
||||
for region in "${PREFERRED_REGION[@]}"; do
|
||||
@ -313,13 +306,10 @@ function upload-server-tars() {
|
||||
|
||||
echo "+++ Staging server tars to Google Storage: ${staging_path}"
|
||||
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
|
||||
local salt_gs_url="${staging_path}/${SALT_TAR##*/}"
|
||||
copy-to-staging "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}"
|
||||
copy-to-staging "${staging_path}" "${salt_gs_url}" "${SALT_TAR}" "${SALT_TAR_HASH}"
|
||||
|
||||
# Convert from gs:// URL to an https:// URL
|
||||
server_binary_tar_urls+=("${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
||||
salt_tar_urls+=("${salt_gs_url/gs:\/\//https://storage.googleapis.com/}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}"
|
||||
copy-to-staging "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}"
|
||||
@ -329,7 +319,6 @@ function upload-server-tars() {
|
||||
done
|
||||
|
||||
SERVER_BINARY_TAR_URL=$(join_csv "${server_binary_tar_urls[@]}")
|
||||
SALT_TAR_URL=$(join_csv "${salt_tar_urls[@]}")
|
||||
if [[ -n "${KUBE_MANIFESTS_TAR:-}" ]]; then
|
||||
KUBE_MANIFESTS_TAR_URL=$(join_csv "${kube_manifests_tar_urls[@]}")
|
||||
fi
|
||||
|
@ -1,86 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
|
||||
# yaml.in, json files in cluster/addons (minus any demos) and overlays
|
||||
# them into kube-addons, where we expect them.
|
||||
# These files are expected in a salt/kube-addons subdirectory.
|
||||
pkg_tar(
|
||||
name = "_salt_kube-addons",
|
||||
package_dir = "salt/kube-addons",
|
||||
strip_prefix = "/cluster/addons",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cluster/addons",
|
||||
],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "salt",
|
||||
files = glob(
|
||||
["**"],
|
||||
exclude = ["BUILD"],
|
||||
),
|
||||
mode = "0644",
|
||||
modes = {
|
||||
"install.sh": "0755",
|
||||
},
|
||||
package_dir = "kubernetes/saltbase",
|
||||
strip_prefix = ".",
|
||||
deps = [
|
||||
":_salt_kube-addons",
|
||||
],
|
||||
)
|
||||
|
||||
# The following are used in the kubernetes salt tarball.
|
||||
pkg_tar(
|
||||
name = "salt-manifests",
|
||||
files = [
|
||||
"salt/kube-proxy/kube-proxy.manifest",
|
||||
"salt/kube-registry-proxy/kube-registry-proxy.yaml",
|
||||
],
|
||||
mode = "0644",
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "_kube-admission-controls",
|
||||
files = glob(["salt/kube-admission-controls/limit-range/**"]),
|
||||
mode = "0644",
|
||||
# Maintain limit-range/ subdirectory in tarball
|
||||
strip_prefix = "./salt/kube-admission-controls/",
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "gci-trusty-salt-manifests",
|
||||
files = [
|
||||
"salt/cluster-autoscaler/cluster-autoscaler.manifest",
|
||||
"salt/e2e-image-puller/e2e-image-puller.manifest",
|
||||
"salt/etcd/etcd.manifest",
|
||||
"salt/kube-addons/kube-addon-manager.yaml",
|
||||
"salt/kube-apiserver/abac-authz-policy.jsonl",
|
||||
"salt/kube-apiserver/kube-apiserver.manifest",
|
||||
"salt/kube-controller-manager/kube-controller-manager.manifest",
|
||||
"salt/kube-scheduler/kube-scheduler.manifest",
|
||||
"salt/l7-gcp/glbc.manifest",
|
||||
"salt/rescheduler/rescheduler.manifest",
|
||||
],
|
||||
mode = "0644",
|
||||
deps = [
|
||||
"_kube-admission-controls",
|
||||
],
|
||||
)
|
@ -1,19 +0,0 @@
|
||||
# SaltStack configuration
|
||||
|
||||
This is the root of the SaltStack configuration for Kubernetes. A high
|
||||
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](https://kubernetes.io/docs/admin/salt/)
|
||||
|
||||
This SaltStack configuration currently applies to default
|
||||
configurations for Debian-on-GCE. (That doesn't mean it can't
|
||||
be made to apply to an arbitrary configuration, but those are
|
||||
only the in-tree OS/IaaS combinations supported today.) As you
|
||||
peruse the configuration, this is shorthanded as `gce`, in `grains.cloud`;
|
||||
the documentation in this tree uses this same shorthand for convenience.
|
||||
|
||||
See more:
|
||||
* [pillar](pillar/)
|
||||
* [reactor](reactor/)
|
||||
* [salt](salt/)
|
||||
|
||||
|
||||
[]()
|
@ -1,109 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script will set up the salt directory on the target server. It takes one
|
||||
# argument that is a tarball with the pre-compiled kubernetes server binaries.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SALT_ROOT=$(dirname "${BASH_SOURCE}")
|
||||
readonly SALT_ROOT
|
||||
|
||||
readonly KUBE_DOCKER_WRAPPED_BINARIES=(
|
||||
kube-apiserver
|
||||
kube-controller-manager
|
||||
kube-scheduler
|
||||
kube-proxy
|
||||
)
|
||||
|
||||
readonly SERVER_BIN_TAR=${1-}
|
||||
if [[ -z "$SERVER_BIN_TAR" ]]; then
|
||||
echo "!!! No binaries specified"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a temp dir for untaring
|
||||
KUBE_TEMP=$(mktemp --tmpdir=/srv -d -t kubernetes.XXXXXX)
|
||||
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
||||
|
||||
# This file is meant to run on the master. It will install the salt configs
|
||||
# into the appropriate place on the master. We do this by creating a new set of
|
||||
# salt trees and then quickly mv'ing them where the old ones were.
|
||||
|
||||
readonly SALTDIRS=(salt pillar reactor)
|
||||
|
||||
echo "+++ Installing salt files into new trees"
|
||||
rm -rf /srv/salt-new
|
||||
mkdir -p /srv/salt-new
|
||||
|
||||
# This bash voodoo will prepend $SALT_ROOT to the start of each item in the
|
||||
# $SALTDIRS array
|
||||
cp -v -R --preserve=mode "${SALTDIRS[@]/#/${SALT_ROOT}/}" /srv/salt-new
|
||||
|
||||
echo "+++ Installing salt overlay files"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/salt-overlay/$dir" ]]; then
|
||||
cp -v -R --preserve=mode "/srv/salt-overlay/$dir" "/srv/salt-new/"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "+++ Install binaries from tar: $1"
|
||||
tar -xz -C "${KUBE_TEMP}" -f "$1"
|
||||
mkdir -p /srv/salt-new/salt/kube-bins
|
||||
mkdir -p /srv/salt-new/salt/kube-docs
|
||||
cp -v "${KUBE_TEMP}/kubernetes/server/bin/"* /srv/salt-new/salt/kube-bins/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/LICENSES" /srv/salt-new/salt/kube-docs/
|
||||
cp -v "${KUBE_TEMP}/kubernetes/kubernetes-src.tar.gz" /srv/salt-new/salt/kube-docs/
|
||||
|
||||
kube_bin_dir="/srv/salt-new/salt/kube-bins";
|
||||
docker_images_sls_file="/srv/salt-new/pillar/docker-images.sls";
|
||||
for docker_file in "${KUBE_DOCKER_WRAPPED_BINARIES[@]}"; do
|
||||
docker_tag=$(cat ${kube_bin_dir}/${docker_file}.docker_tag);
|
||||
if [[ ! -z "${KUBE_IMAGE_TAG:-}" ]]; then
|
||||
docker_tag="${KUBE_IMAGE_TAG}"
|
||||
fi
|
||||
sed -i "s/#${docker_file}_docker_tag_value#/${docker_tag}/" "${docker_images_sls_file}";
|
||||
done
|
||||
|
||||
cat <<EOF >>"${docker_images_sls_file}"
|
||||
kube_docker_registry: '$(echo ${KUBE_DOCKER_REGISTRY:-gcr.io/google_containers})'
|
||||
EOF
|
||||
|
||||
# TODO(zmerlynn): Forgive me, this is really gross. But in order to
|
||||
# avoid breaking the non-Salt deployments, which already painfully
|
||||
# have to templatize a couple of the add-ons anyways, manually
|
||||
# templatize the addon registry for regional support. When we get
|
||||
# better templating, we can fix this.
|
||||
readonly kube_addon_registry="${KUBE_ADDON_REGISTRY:-gcr.io/google_containers}"
|
||||
if [[ "${kube_addon_registry}" != "gcr.io/google_containers" ]]; then
|
||||
find /srv/salt-new -name \*.yaml -or -name \*.yaml.in | \
|
||||
xargs sed -ri "s@(image:\s.*)gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
# All the legacy .manifest files with hardcoded gcr.io are JSON.
|
||||
find /srv/salt-new -name \*.manifest -or -name \*.json | \
|
||||
xargs sed -ri "s@(image\":\s+\")gcr.io/google_containers@\1${kube_addon_registry}@"
|
||||
fi
|
||||
|
||||
echo "+++ Swapping in new configs"
|
||||
for dir in "${SALTDIRS[@]}"; do
|
||||
if [[ -d "/srv/$dir" ]]; then
|
||||
rm -rf "/srv/$dir"
|
||||
fi
|
||||
mv -v "/srv/salt-new/$dir" "/srv/$dir"
|
||||
done
|
||||
|
||||
rm -rf /srv/salt-new
|
@ -1,22 +0,0 @@
|
||||
The
|
||||
[SaltStack pillar](http://docs.saltstack.com/en/latest/topics/pillar/)
|
||||
data is partially statically derived from the contents of this
|
||||
directory. The bulk of the pillars are hard to perceive from browsing
|
||||
this directory, though, because they are written into
|
||||
[cluster-params.sls](cluster-params.sls) at cluster inception.
|
||||
|
||||
* [cluster-params.sls](cluster-params.sls) is generated entirely at cluster inception. See e.g. [configure-vm.sh](../../gce/configure-vm.sh#L262)
|
||||
* [docker-images.sls](docker-images.sls) stores the Docker tags of the current Docker-wrapped server binaries, twiddling by the Salt install script
|
||||
* [logging.sls](logging.sls) defines the cluster log level
|
||||
* [mine.sls](mine.sls): defines the variables shared across machines in the Salt
|
||||
mine. It is starting to be largely deprecated in use, and is totally
|
||||
unavailable on GCE, which runs standalone.
|
||||
* [privilege.sls](privilege.sls) defines whether privileged containers are allowed.
|
||||
* [top.sls](top.sls) defines which pillars are active across the cluster.
|
||||
|
||||
## Future work
|
||||
|
||||
Document the current pillars across providers
|
||||
|
||||
|
||||
[]()
|
@ -1,4 +0,0 @@
|
||||
# This file is meant to be replaced with cluster specific parameters if necessary.
|
||||
|
||||
# Examples:
|
||||
# node_instance_prefix: <base of regex for -minion_regexp to apiserver>
|
@ -1,5 +0,0 @@
|
||||
# This file is populated when kubernetes is built.
|
||||
kube-apiserver_docker_tag: #kube-apiserver_docker_tag_value#
|
||||
kube-controller-manager_docker_tag: #kube-controller-manager_docker_tag_value#
|
||||
kube-scheduler_docker_tag: #kube-scheduler_docker_tag_value#
|
||||
kube-proxy_docker_tag: #kube-proxy_docker_tag_value#
|
@ -1 +0,0 @@
|
||||
log_level: "--v=2"
|
@ -1,12 +0,0 @@
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' -%}
|
||||
# On GCE, there is no Salt mine. We run standalone.
|
||||
{% else %}
|
||||
# Allow everyone to see cached values of who sits at what IP
|
||||
{% set networkInterfaceName = "eth0" %}
|
||||
{% if grains.networkInterfaceName is defined %}
|
||||
{% set networkInterfaceName = grains.networkInterfaceName %}
|
||||
{% endif %}
|
||||
mine_functions:
|
||||
network.ip_addrs: [{{networkInterfaceName}}]
|
||||
grains.items: []
|
||||
{% endif -%}
|
@ -1,2 +0,0 @@
|
||||
# If true, allow privileged containers to be created by API
|
||||
allow_privileged: true
|
@ -1,9 +0,0 @@
|
||||
{% if grains['oscodename'] in [ 'vivid', 'wily', 'jessie', 'xenial', 'yakkety' ] %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /lib/systemd/system
|
||||
{% elif grains['os_family'] == 'RedHat' %}
|
||||
is_systemd: True
|
||||
systemd_system_path: /usr/lib/systemd/system
|
||||
{% else %}
|
||||
is_systemd: False
|
||||
{% endif %}
|
@ -1,8 +0,0 @@
|
||||
base:
|
||||
'*':
|
||||
- mine
|
||||
- cluster-params
|
||||
- logging
|
||||
- docker-images
|
||||
- privilege
|
||||
- systemd
|
@ -1,6 +0,0 @@
|
||||
[SaltStack reactor](http://docs.saltstack.com/en/latest/topics/reactor/) files, largely defining reactions to new nodes.
|
||||
|
||||
**Ignored for GCE, which runs standalone on each machine**
|
||||
|
||||
|
||||
[]()
|
@ -1,10 +0,0 @@
|
||||
# This runs highstate on the master node(s).
|
||||
#
|
||||
# Some of the cluster deployment scripts pass the list of minion addresses to
|
||||
# the apiserver as a command line argument. This list needs to be updated if a
|
||||
# new minion is started, so run highstate on the master(s) when this happens.
|
||||
#
|
||||
highstate_master:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-master'
|
||||
- expr_form: grain
|
@ -1,10 +0,0 @@
|
||||
# This runs highstate on the minion nodes.
|
||||
#
|
||||
# Some of the cluster deployment scripts use the list of minions on the minions
|
||||
# themselves. To propagate changes throughout
|
||||
# the pool, run highstate on all minions whenever a single minion starts.
|
||||
#
|
||||
highstate_minions:
|
||||
cmd.state.highstate:
|
||||
- tgt: 'roles:kubernetes-pool'
|
||||
- expr_form: grain
|
@ -1,4 +0,0 @@
|
||||
# This runs highstate only on the NEW node, regardless of type.
|
||||
highstate_new:
|
||||
cmd.state.highstate:
|
||||
- tgt: {{ data['id'] }}
|
@ -1,29 +0,0 @@
|
||||
This directory forms the base of the main SaltStack configuration. The
|
||||
place to start with any SaltStack configuration is
|
||||
[top.sls](top.sls). However, unless you are particularly keen on
|
||||
reading Jinja templates, the following tables break down what
|
||||
configurations run on what providers. (NB: The [_states](_states/)
|
||||
directory is a special directory included by Salt for `ensure` blocks,
|
||||
and is only used for the [docker](docker/) config.)
|
||||
|
||||
Key: M = Config applies to master, n = config applies to nodes
|
||||
|
||||
Config | GCE | Vagrant | AWS | Azure
|
||||
----------------------------------------------------|-------|---------|-----|------
|
||||
[debian-auto-upgrades](debian-auto-upgrades/) | M n | M n | M n | M n
|
||||
[docker](docker/) | M n | M n | M n | M n
|
||||
[etcd](etcd/) | M | M | M | M
|
||||
[generate-cert](generate-cert/) | M | M | M | M
|
||||
[kube-addons](kube-addons/) | M | M | M | M
|
||||
[kube-apiserver](kube-apiserver/) | M | M | M | M
|
||||
[kube-controller-manager](kube-controller-manager/) | M | M | M | M
|
||||
[kube-proxy](kube-proxy/) | n | n | n | n
|
||||
[kube-scheduler](kube-scheduler/) | M | M | M | M
|
||||
[kubelet](kubelet/) | M n | M n | M n | M n
|
||||
[logrotate](logrotate/) | M n | n | M n | M n
|
||||
[supervisord](supervisor/) | M n | M n | M n | M n
|
||||
[base](base.sls) | M n | M n | M n | M n
|
||||
[kube-client-tools](kube-client-tools.sls) | M | M | M | M
|
||||
|
||||
|
||||
[]()
|
@ -1,46 +0,0 @@
|
||||
pkg-core:
|
||||
pkg.installed:
|
||||
- names:
|
||||
- curl
|
||||
- ebtables
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- python
|
||||
- git
|
||||
- socat
|
||||
{% else %}
|
||||
- apt-transport-https
|
||||
- python-apt
|
||||
- nfs-common
|
||||
- socat
|
||||
{% endif %}
|
||||
# Ubuntu installs netcat-openbsd by default, but on GCE/Debian netcat-traditional is installed.
|
||||
# They behave slightly differently.
|
||||
# For sanity, we try to make sure we have the same netcat on all OSes (#15166)
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- netcat-traditional
|
||||
{% endif %}
|
||||
# Make sure git is installed for mounting git volumes
|
||||
{% if grains['os'] == 'Ubuntu' %}
|
||||
- git
|
||||
{% endif %}
|
||||
|
||||
/usr/local/share/doc/kubernetes:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/usr/local/share/doc/kubernetes/LICENSES:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/LICENSES
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/share/doc/kubernetes/kubernetes-src.tar.gz:
|
||||
file.managed:
|
||||
- source: salt://kube-docs/kubernetes-src.tar.gz
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
@ -1,9 +0,0 @@
|
||||
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
|
||||
|
||||
ip6_tables:
|
||||
kmod.present
|
||||
|
||||
xt_set:
|
||||
kmod.present
|
||||
|
||||
{% endif -%}
|
@ -1,12 +0,0 @@
|
||||
reviewers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
approvers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
@ -1,25 +0,0 @@
|
||||
# Copy autoscaler manifest to manifests folder for master.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
|
||||
/etc/kubernetes/manifests/cluster-autoscaler.manifest:
|
||||
file.managed:
|
||||
- source: salt://cluster-autoscaler/cluster-autoscaler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/cluster-autoscaler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
@ -1,8 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
@ -1,26 +0,0 @@
|
||||
/home/kubernetes:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/cni/net.d:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
# These are all available CNI network plugins.
|
||||
cni-tar:
|
||||
archive:
|
||||
- extracted
|
||||
- user: root
|
||||
- name: /home/kubernetes/bin
|
||||
- makedirs: True
|
||||
- source: https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.6.0.tgz
|
||||
- tar_options: v
|
||||
- source_hash: md5=9534876FAE7DBE813CDAB404DC1F9219
|
||||
- archive_format: tar
|
||||
- if_missing: /home/kubernetes/bin
|
@ -1,4 +0,0 @@
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "1";
|
||||
|
||||
APT::Periodic::AutocleanInterval "7";
|
@ -1,13 +0,0 @@
|
||||
{% if grains['os_family'] == 'Debian' %}
|
||||
unattended-upgrades:
|
||||
pkg.installed
|
||||
|
||||
'/etc/apt/apt.conf.d/20auto-upgrades':
|
||||
file.managed:
|
||||
- source: salt://debian-auto-upgrades/20auto-upgrades
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- pkg: unattended-upgrades
|
||||
{% endif %}
|
@ -1,8 +0,0 @@
|
||||
{% set docker_opts = "" -%}
|
||||
{% if grains.docker_opts is defined and grains.docker_opts -%}
|
||||
{% set docker_opts = grains.docker_opts -%}
|
||||
{% endif -%}
|
||||
|
||||
DOCKER_OPTS='{{docker_opts}}'
|
||||
OPTIONS='{{docker_opts}}'
|
||||
DOCKER_CERT_PATH=/etc/docker
|
@ -1,18 +0,0 @@
|
||||
{% set grains_opts = grains.get('docker_opts', '') -%}
|
||||
{% set e2e_opts = '' -%}
|
||||
{% if pillar.get('e2e_storage_test_environment', '').lower() == 'true' -%}
|
||||
{% set e2e_opts = '-s devicemapper' -%}
|
||||
{% endif -%}
|
||||
{% set bridge_opts = "--bridge=cbr0" %}
|
||||
{% if pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
{% set bridge_opts = "" %}
|
||||
{% endif -%}
|
||||
{% if pillar.get('network_provider', '').lower() == 'cni' %}
|
||||
{% set bridge_opts = "" %}
|
||||
{% endif -%}
|
||||
{% set log_level = "--log-level=warn" -%}
|
||||
{% if pillar['docker_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['docker_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
DOCKER_OPTS="{{grains_opts}} {{e2e_opts}} {{bridge_opts}} --iptables=false --ip-masq=false {{log_level}}"
|
||||
DOCKER_NOFILE=1000000
|
@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is intended to be run periodically, to check the health
|
||||
# of docker. If it detects a failure, it will restart docker using systemctl.
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker failed"
|
||||
echo "Giving docker 30 seconds grace before restarting"
|
||||
sleep 30
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
echo "docker recovered"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker still down; triggering docker restart"
|
||||
systemctl restart docker
|
||||
|
||||
echo "Waiting 60 seconds to give docker time to start"
|
||||
sleep 60
|
||||
|
||||
if timeout 10 docker version > /dev/null; then
|
||||
echo "docker recovered"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "docker still failing"
|
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Run docker-healthcheck once
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/opt/kubernetes/helpers/docker-healthcheck
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Trigger docker-healthcheck periodically
|
||||
|
||||
[Timer]
|
||||
OnUnitInactiveSec=10s
|
||||
Unit=docker-healthcheck.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is intended to be run before we start Docker.
|
||||
|
||||
# cleanup docker network checkpoint to avoid running into known issue
|
||||
# of docker (https://github.com/docker/docker/issues/18283)
|
||||
rm -rf /var/lib/docker/network
|
||||
|
@ -1 +0,0 @@
|
||||
deb https://apt.dockerproject.org/repo debian-{{ salt['grains.get']('oscodename') }} main
|
@ -1,21 +0,0 @@
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=https://docs.docker.com
|
||||
After=network.target docker.socket
|
||||
Requires=docker.socket
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
EnvironmentFile={{ environment_file }}
|
||||
ExecStart=/usr/bin/docker daemon -H fd:// "$DOCKER_OPTS"
|
||||
MountFlags=slave
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
LimitCORE=infinity
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
ExecStartPre=/opt/kubernetes/helpers/docker-prestart
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,493 +0,0 @@
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/docker' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/docker' %}
|
||||
{% endif %}
|
||||
|
||||
bridge-utils:
|
||||
pkg.installed
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud == 'openstack' %}
|
||||
|
||||
cbr0:
|
||||
# workaround https://github.com/saltstack/salt/issues/20570
|
||||
kmod.present:
|
||||
- name: bridge
|
||||
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
- proto: none
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
- require:
|
||||
- kmod: cbr0
|
||||
{% endif %}
|
||||
|
||||
{% if (grains.os == 'Fedora' and grains.osrelease_info[0] >= 22) or (grains.os == 'CentOS' and grains.osrelease_info[0] >= 7) %}
|
||||
|
||||
docker:
|
||||
pkg:
|
||||
- installed
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: docker
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker
|
||||
|
||||
{% else %}
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: docker-io
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker-io
|
||||
|
||||
{% endif %}
|
||||
{% elif grains.cloud is defined and grains.cloud == 'azure-legacy' %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- require:
|
||||
- pkg: docker-engine
|
||||
|
||||
apt-key:
|
||||
pkgrepo.managed:
|
||||
- humanname: Dotdeb
|
||||
- name: deb https://apt.dockerproject.org/repo ubuntu-trusty main
|
||||
- dist: ubuntu-trusty
|
||||
- file: /etc/apt/sources.list.d/docker.list
|
||||
- keyid: 58118E89F3A912897C070ADBF76221572C52609D
|
||||
- keyserver: hkp://p80.pool.sks-keyservers.net:80
|
||||
|
||||
lxc-docker:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
cbr0:
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- proto: none
|
||||
{% else %}
|
||||
- proto: dhcp
|
||||
{% endif %}
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
{% if grains['roles'][0] == 'kubernetes-pool' %}
|
||||
- ipaddr: {{ grains['cbr-cidr'] }}
|
||||
{% endif %}
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
|
||||
docker-engine:
|
||||
pkg:
|
||||
- installed
|
||||
- require:
|
||||
- pkgrepo: 'apt-key'
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{% elif grains.cloud is defined and grains.cloud in ['photon-controller'] and grains.os == 'Debian' and grains.osrelease_info[0] >=8 %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-prestart:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-prestart
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
- require:
|
||||
- file: /opt/kubernetes/helpers/docker-prestart
|
||||
- pkg: docker-engine
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- require:
|
||||
- pkg: docker-engine
|
||||
|
||||
apt-key:
|
||||
cmd.run:
|
||||
- name: 'apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D'
|
||||
- unless: 'apt-key finger | grep "5811 8E89"'
|
||||
|
||||
apt-update:
|
||||
cmd.run:
|
||||
- name: '/usr/bin/apt-get update -y'
|
||||
- require:
|
||||
- cmd : 'apt-key'
|
||||
|
||||
lxc-docker:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- purged
|
||||
|
||||
cbr0:
|
||||
network.managed:
|
||||
- enabled: True
|
||||
- type: bridge
|
||||
- proto: dhcp
|
||||
- ports: none
|
||||
- bridge: cbr0
|
||||
- delay: 0
|
||||
- bypassfirewall: True
|
||||
- require_in:
|
||||
- service: docker
|
||||
|
||||
/etc/apt/sources.list.d/docker.list:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.list
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- require:
|
||||
- cmd: 'apt-update'
|
||||
|
||||
# restricting docker version to 1.9. with older version of docker we are facing
|
||||
# issue https://github.com/docker/docker/issues/18793.
|
||||
# newer version of docker 1.10.0 is not well tested yet.
|
||||
# full comments: https://github.com/kubernetes/kubernetes/pull/20851
|
||||
docker-engine:
|
||||
pkg:
|
||||
- installed
|
||||
- version: 1.9.*
|
||||
- require:
|
||||
- file: /etc/apt/sources.list.d/docker.list
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% if grains.cloud is defined
|
||||
and grains.cloud == 'gce' %}
|
||||
# The default GCE images have ip_forwarding explicitly set to 0.
|
||||
# Here we take care of commenting that out.
|
||||
/etc/sysctl.d/11-gce-network-security.conf:
|
||||
file.replace:
|
||||
- pattern: '^net.ipv4.ip_forward=0'
|
||||
- repl: '# net.ipv4.ip_forward=0'
|
||||
{% endif %}
|
||||
|
||||
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
|
||||
/etc/sysctl.d/99-salt.conf:
|
||||
file.touch
|
||||
|
||||
# TODO: This should really be based on network strategy instead of os_family
|
||||
net.ipv4.ip_forward:
|
||||
sysctl.present:
|
||||
- value: 1
|
||||
|
||||
{% if pillar.get('softlockup_panic', '').lower() == 'true' %}
|
||||
# TODO(dchen1107) Remove this once kernel.softlockup_panic is built into the CVM image.
|
||||
/etc/sysctl.conf:
|
||||
file.append:
|
||||
- text:
|
||||
- "kernel.softlockup_panic = 1"
|
||||
- "kernel.softlockup_all_cpu_backtrace = 1"
|
||||
|
||||
'sysctl-reload':
|
||||
cmd.run:
|
||||
- name: 'sysctl --system'
|
||||
- unless: 'sysctl -a | grep "kernel.softlockup_panic = 1"'
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-defaults
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
# Docker is on the ContainerVM image by default. The following
|
||||
# variables are provided for other cloud providers, and for testing and dire circumstances, to allow
|
||||
# overriding the Docker version that's in a ContainerVM image.
|
||||
#
|
||||
# To change:
|
||||
#
|
||||
# 1. Find new deb name at:
|
||||
# http://apt.dockerproject.org/repo/pool/main/d/docker-engine
|
||||
# 2. Download based on that:
|
||||
# curl -O http://apt.dockerproject.org/repo/pool/main/d/docker-engine/<deb>
|
||||
# 3. Upload to GCS:
|
||||
# gsutil cp <deb> gs://kubernetes-release/docker/<deb>
|
||||
# 4. Make it world readable:
|
||||
# gsutil acl ch -R -g all:R gs://kubernetes-release/docker/<deb>
|
||||
# 5. Get a hash of the deb:
|
||||
# shasum <deb>
|
||||
# 6. Update override_deb, override_deb_sha1, override_docker_ver with new
|
||||
# deb name, new hash and new version
|
||||
|
||||
{% set storage_base='https://storage.googleapis.com/kubernetes-release/docker/' %}
|
||||
|
||||
{% set override_deb_url='' %}
|
||||
|
||||
{% if grains.get('cloud', '') == 'gce'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'wheezy' -%}
|
||||
{% set docker_pkg_name='' %}
|
||||
{% set override_deb='' %}
|
||||
{% set override_deb_sha1='' %}
|
||||
{% set override_docker_ver='' %}
|
||||
|
||||
{% elif grains.get('cloud', '') == 'gce'
|
||||
and grains.get('os_family', '') == 'Debian'
|
||||
and grains.get('oscodename', '') == 'jessie' -%}
|
||||
{% set docker_pkg_name='' %}
|
||||
{% set override_deb='' %}
|
||||
{% set override_deb_sha1='' %}
|
||||
{% set override_docker_ver='' %}
|
||||
|
||||
{% else %}
|
||||
{% set docker_pkg_name='lxc-docker-1.7.1' %}
|
||||
{% set override_docker_ver='1.7.1' %}
|
||||
{% set override_deb='lxc-docker-1.7.1_1.7.1_amd64.deb' %}
|
||||
{% set override_deb_sha1='81abef31dd2c616883a61f85bfb294d743b1c889' %}
|
||||
{% endif %}
|
||||
|
||||
{% if override_deb_url == '' %}
|
||||
{% set override_deb_url=storage_base + override_deb %}
|
||||
{% endif %}
|
||||
|
||||
{% if override_docker_ver != '' %}
|
||||
purge-old-docker-package:
|
||||
pkg.removed:
|
||||
- pkgs:
|
||||
- lxc-docker-1.6.2
|
||||
|
||||
/var/cache/docker-install/{{ override_deb }}:
|
||||
file.managed:
|
||||
- source: {{ override_deb_url }}
|
||||
- source_hash: sha1={{ override_deb_sha1 }}
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
# Drop the license file into /usr/share so that everything is crystal clear.
|
||||
/usr/share/doc/docker/apache.txt:
|
||||
file.managed:
|
||||
- source: {{ storage_base }}apache2.txt
|
||||
- source_hash: sha1=2b8b815229aa8a61e483fb4ba0588b8b6c491890
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
libltdl7:
|
||||
pkg.installed
|
||||
|
||||
docker-upgrade:
|
||||
cmd.run:
|
||||
- name: /opt/kubernetes/helpers/pkg install-no-start {{ docker_pkg_name }} {{ override_docker_ver }} /var/cache/docker-install/{{ override_deb }}
|
||||
- require:
|
||||
- file: /var/cache/docker-install/{{ override_deb }}
|
||||
- pkg: libltdl7
|
||||
|
||||
{% endif %} # end override_docker_ver != ''
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-prestart:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-prestart
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# Default docker systemd unit file doesn't use an EnvironmentFile; replace it with one that does.
|
||||
{{ pillar.get('systemd_system_path') }}/docker.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- defaults:
|
||||
environment_file: {{ environment_file }}
|
||||
- require:
|
||||
- file: /opt/kubernetes/helpers/docker-prestart
|
||||
|
||||
# The docker service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-docker:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services enable docker
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker.service
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- require:
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
|
||||
/opt/kubernetes/helpers/docker-healthcheck:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.service:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck.service
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer:
|
||||
file.managed:
|
||||
- source: salt://docker/docker-healthcheck.timer
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
# Tell systemd to load the timer
|
||||
fix-systemd-docker-healthcheck-timer:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.timer
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.timer
|
||||
|
||||
# Trigger a first run of docker-healthcheck; needed because the timer fires 10s after the previous run.
|
||||
fix-systemd-docker-healthcheck-service:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce docker-healthcheck.service
|
||||
- watch:
|
||||
- file: {{ pillar.get('systemd_system_path') }}/docker-healthcheck.service
|
||||
- require:
|
||||
- cmd: fix-service-docker
|
||||
|
||||
{% endif %}
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
# If we put a watch on this, salt will try to start the service.
|
||||
# We put the watch on the fixer instead
|
||||
{% if not pillar.get('is_systemd') %}
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: {{ environment_file }}
|
||||
{% if override_docker_ver != '' %}
|
||||
- cmd: docker-upgrade
|
||||
{% endif %}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- cmd: fix-service-docker
|
||||
{% endif %}
|
||||
{% endif %} # end grains.os_family != 'RedHat'
|
@ -1,12 +0,0 @@
|
||||
/etc/kubernetes/manifests/e2e-image-puller.manifest:
|
||||
file.managed:
|
||||
- source: salt://e2e-image-puller/e2e-image-puller.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
@ -1,28 +0,0 @@
|
||||
e2e:
|
||||
# Install various packages required by e2e tests to all hosts.
|
||||
pkg.installed:
|
||||
- refresh: true
|
||||
- pkgs:
|
||||
- targetcli
|
||||
- ceph
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- glusterfs-fuse
|
||||
- rbd-fuse
|
||||
- iscsi-initiator-utils
|
||||
- nfs-utils
|
||||
{% else %}
|
||||
- glusterfs-client
|
||||
- open-iscsi
|
||||
- iscsitarget-dkms
|
||||
- nfs-common
|
||||
{% endif %}
|
||||
|
||||
|
||||
|
||||
{% if grains['os_family'] == 'Debian' %}
|
||||
# On Debian, re-start open-iscsi to generate unique
|
||||
# /etc/iscsi/initiatorname.iscsi
|
||||
open-iscsi:
|
||||
cmd.run:
|
||||
- name: 'service open-iscsi restart'
|
||||
{% endif %}
|
@ -1,83 +0,0 @@
|
||||
# Early configurations of Kubernetes ran etcd on the host and as part of a migration step, we began to delete the host etcd
|
||||
# It's possible though that the host has configured a separate etcd to configure other services like Flannel
|
||||
# In that case, we do not want Salt to remove or stop the host service
|
||||
# Note: its imperative that the host installed etcd not conflict with the Kubernetes managed etcd
|
||||
{% if grains['keep_host_etcd'] is not defined %}
|
||||
|
||||
delete_etc_etcd_dir:
|
||||
file.absent:
|
||||
- name: /etc/etcd
|
||||
|
||||
delete_etcd_conf:
|
||||
file.absent:
|
||||
- name: /etc/etcd/etcd.conf
|
||||
|
||||
delete_etcd_default:
|
||||
file.absent:
|
||||
- name: /etc/default/etcd
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
delete_etcd_service_file:
|
||||
file.absent:
|
||||
- name: {{ pillar.get('systemd_system_path') }}/etcd.service
|
||||
{% endif %}
|
||||
|
||||
delete_etcd_initd:
|
||||
file.absent:
|
||||
- name: /etc/init.d/etcd
|
||||
|
||||
#stop legacy etcd_service
|
||||
stop_etcd-service:
|
||||
service.dead:
|
||||
- name: etcd
|
||||
- enable: None
|
||||
|
||||
{% endif %}
|
||||
|
||||
touch /var/log/etcd.log:
|
||||
cmd.run:
|
||||
- creates: /var/log/etcd.log
|
||||
|
||||
touch /var/log/etcd-events.log:
|
||||
cmd.run:
|
||||
- creates: /var/log/etcd-events.log
|
||||
|
||||
/var/etcd:
|
||||
file.directory:
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 700
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
- mode
|
||||
|
||||
/etc/kubernetes/manifests/etcd.manifest:
|
||||
file.managed:
|
||||
- source: salt://etcd/etcd.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
suffix: ""
|
||||
port: 2379
|
||||
server_port: 2380
|
||||
cpulimit: '"200m"'
|
||||
|
||||
/etc/kubernetes/manifests/etcd-events.manifest:
|
||||
file.managed:
|
||||
- source: salt://etcd/etcd.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
suffix: "-events"
|
||||
port: 4002
|
||||
server_port: 2381
|
||||
cpulimit: '"100m"'
|
@ -1,37 +0,0 @@
|
||||
{% set master_extra_sans=grains.get('master_extra_sans', '') %}
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
{% set cert_ip='_use_gce_external_ip_' %}
|
||||
{% endif %}
|
||||
|
||||
# If there is a pillar defined, override any defaults.
|
||||
{% if pillar['cert_ip'] is defined %}
|
||||
{% set cert_ip=pillar['cert_ip'] %}
|
||||
{% endif %}
|
||||
|
||||
{% set certgen="make-cert.sh" %}
|
||||
{% if cert_ip is defined %}
|
||||
{% set certgen="make-ca-cert.sh" %}
|
||||
{% endif %}
|
||||
|
||||
openssl:
|
||||
pkg.installed: []
|
||||
|
||||
kube-cert:
|
||||
group.present:
|
||||
- system: True
|
||||
|
||||
kubernetes-cert:
|
||||
cmd.script:
|
||||
- unless: test -f /srv/kubernetes/server.cert
|
||||
- source: salt://generate-cert/{{certgen}}
|
||||
{% if cert_ip is defined %}
|
||||
- args: {{cert_ip}} {{master_extra_sans}}
|
||||
- require:
|
||||
- pkg: curl
|
||||
{% endif %}
|
||||
- cwd: /
|
||||
- user: root
|
||||
- group: root
|
||||
- shell: /bin/bash
|
||||
- require:
|
||||
- pkg: openssl
|
@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
cert_dir=${CERT_DIR:-/srv/kubernetes}
|
||||
cert_group=${CERT_GROUP:-kube-cert}
|
||||
|
||||
mkdir -p "$cert_dir"
|
||||
|
||||
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
|
||||
-subj "/CN=kubernetes.invalid/O=Kubernetes" \
|
||||
-keyout "${cert_dir}/server.key" -out "${cert_dir}/server.cert"
|
||||
chgrp $cert_group "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
||||
chmod 660 "${cert_dir}/server.key" "${cert_dir}/server.cert"
|
@ -1,215 +0,0 @@
|
||||
addon-dir-delete:
|
||||
file.absent:
|
||||
- name: /etc/kubernetes/addons
|
||||
|
||||
addon-dir-create:
|
||||
file.directory:
|
||||
- name: /etc/kubernetes/addons
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0755
|
||||
- require:
|
||||
- file: addon-dir-delete
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'influxdb' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/influxdb:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/influxdb
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_l7_loadbalancing', '').lower() == 'glbc' %}
|
||||
/etc/kubernetes/addons/cluster-loadbalancing/glbc:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-loadbalancing/glbc
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'google' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/google:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/google
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'stackdriver' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/stackdriver:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/stackdriver
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'standalone' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/standalone:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring/standalone
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'googleinfluxdb' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring/googleinfluxdb:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- exclude_pat: E@(^.+heapster-controller\.yaml$|^.+heapster-controller\.json$)
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dns/kube-dns.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns/kube-dns.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_dns_horizontal_autoscaler', '').lower() == 'true'
|
||||
and pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/registry/registry-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-svc.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-rc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-rc.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-pv.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-pv.yaml.in
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/registry/registry-pvc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/registry/registry-pvc.yaml.in
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_node_logging', '').lower() == 'true'
|
||||
and 'logging_destination' in pillar
|
||||
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/fluentd-{{ pillar.get('logging_destination') }}:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/fluentd-{{ pillar.get('logging_destination') }}
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_metadata_proxy', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/metadata-proxy/gce:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/metadata-proxy/gce
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_pod_security_policy', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/podsecuritypolicies:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/podsecuritypolicies
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_ui', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dashboard:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/dashboard
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_node_problem_detector', '').lower() == 'daemonset' %}
|
||||
/etc/kubernetes/addons/node-problem-detector/npd.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/node-problem-detector/npd.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
/etc/kubernetes/manifests/kube-addon-manager.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/kube-addon-manager.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if pillar.get('enable_default_storage_class', '').lower() == 'true' and grains['cloud'] is defined and grains['cloud'] == 'gce' %}
|
||||
/etc/kubernetes/addons/storage-class/default.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/storage-class/{{ grains['cloud'] }}/default.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
{% endif %}
|
@ -1,10 +0,0 @@
|
||||
{% if 'LimitRanger' in pillar.get('admission_control', '') %}
|
||||
/etc/kubernetes/admission-controls/limit-range:
|
||||
file.recurse:
|
||||
- source: salt://kube-admission-controls/limit-range
|
||||
- include_pat: E@(^.+\.yaml$|^.+\.json$)
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
@ -1,71 +0,0 @@
|
||||
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}
|
||||
# TODO: generate and distribute tokens on other cloud providers.
|
||||
/srv/kubernetes/known_tokens.csv:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/known_tokens.csv
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
# - watch_in:
|
||||
# - service: kube-apiserver
|
||||
|
||||
/srv/kubernetes/basic_auth.csv:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/basic_auth.csv
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
|
||||
/srv/kubernetes/abac-authz-policy.jsonl:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/abac-authz-policy.jsonl
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 600
|
||||
{% endif %}
|
||||
|
||||
/var/log/kube-apiserver.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/var/log/kube-apiserver-audit.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
# Copy kube-apiserver manifest to manifests folder for kubelet.
|
||||
# Current containervm image by default has both docker and kubelet
|
||||
# running. But during cluster creation stage, docker and kubelet
|
||||
# could be overwritten completely, or restarted due to flag changes.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Without the ordering of salt states, when gce instance boot up,
|
||||
# configure-vm.sh will run and download the release. At the end of
|
||||
# boot, run-salt will installs kube-apiserver.manifest files to
|
||||
# kubelet config directory before the installation of proper version
|
||||
# kubelet. Please see
|
||||
# http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-apiserver.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/kube-apiserver.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
#stop legacy kube-apiserver service
|
||||
stop_kube-apiserver:
|
||||
service.dead:
|
||||
- name: kube-apiserver
|
||||
- enable: None
|
@ -1,6 +0,0 @@
|
||||
/usr/local/bin/kubectl:
|
||||
file.managed:
|
||||
- source: salt://kube-bins/kubectl
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
@ -1,30 +0,0 @@
|
||||
# Copy kube-controller-manager manifest to manifests folder for kubelet.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-controller-manager.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-controller-manager/kube-controller-manager.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/kube-controller-manager.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
stop-legacy-kube_controller_manager:
|
||||
service.dead:
|
||||
- name: kube-controller-manager
|
||||
- enable: None
|
||||
|
@ -1,52 +0,0 @@
|
||||
/etc/kubernetes/kube-master-addons.sh:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/kube-master-addons.sh
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
# Used to restart kube-master-addons service each time salt is run
|
||||
# Actually, it does not work (the service is not restarted),
|
||||
# but master-addon service always terminates after it does it job,
|
||||
# so it is (usually) not running and it will be started when
|
||||
# salt is run.
|
||||
# This salt state is not removed because there is a risk
|
||||
# of introducing regression in 1.0. Please remove it afterwards.
|
||||
# See also the salt config for kube-addons to see how to restart
|
||||
# a service on demand.
|
||||
master-docker-image-tags:
|
||||
file.touch:
|
||||
- name: /srv/pillar/docker-images.sls
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kube-master-addons.service:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/kube-master-addons.service
|
||||
- user: root
|
||||
- group: root
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kube-master-addons
|
||||
- watch:
|
||||
- file: master-docker-image-tags
|
||||
- file: /etc/kubernetes/kube-master-addons.sh
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-master-addons.service
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-master-addons:
|
||||
file.managed:
|
||||
- source: salt://kube-master-addons/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
kube-master-addons:
|
||||
service.running:
|
||||
- enable: True
|
||||
- restart: True
|
||||
- watch:
|
||||
- file: master-docker-image-tags
|
||||
- file: /etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
{% endif %}
|
@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-master-addons
|
||||
# Required-Start: $local_fs $network $syslog docker
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Master Addon Object Manager
|
||||
# Description:
|
||||
# Enforces installation of Kubernetes Master Addon Objects
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Master Addon Object Manager"
|
||||
NAME=kube-master-addons
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
|
||||
echo $! > ${PIDFILE}
|
||||
disown
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
kill $(cat ${PIDFILE})
|
||||
rm ${PIDFILE}
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes-Master Addon Object Manager
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/etc/kubernetes/kube-master-addons.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,90 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
|
||||
|
||||
function load-docker-images() {
|
||||
let loadedImageFlags=0
|
||||
|
||||
while true; do
|
||||
restart_docker=false
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|1"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|2"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|4"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [[ $loadedImageFlags == 7 ]]; then break; fi
|
||||
|
||||
# Sometimes docker load hang, restart docker daemon resolve the issue
|
||||
if [[ $restart_docker ]]; then
|
||||
if ! service docker restart; then # Try systemctl if there's no service command.
|
||||
systemctl restart docker
|
||||
fi
|
||||
fi
|
||||
|
||||
# sleep for 15 seconds before attempting to load docker images again
|
||||
sleep 15
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
function convert-rkt-image() {
|
||||
(cd /tmp; ${DOCKER2ACI_BIN} $1)
|
||||
}
|
||||
|
||||
function load-rkt-images() {
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-apiserver.tar
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-scheduler.tar
|
||||
convert-rkt-image /srv/salt/kube-bins/kube-controller-manager.tar
|
||||
|
||||
# Currently, we can't run docker image tarballs directly,
|
||||
# So we use 'rkt fetch' to load the docker images into rkt image stores.
|
||||
# see https://github.com/coreos/rkt/issues/2392.
|
||||
${RKT_BIN} fetch /tmp/*.aci --insecure-options=image
|
||||
}
|
||||
|
||||
if [[ "${KUBERNETES_CONTAINER_RUNTIME}" == "rkt" ]]; then
|
||||
load-rkt-images
|
||||
else
|
||||
load-docker-images
|
||||
fi
|
||||
|
||||
# Now exit. After kube-push, salt will notice that the service is down and it
|
||||
# will start it and new docker images will be loaded.
|
@ -1,67 +0,0 @@
|
||||
/etc/kubernetes/kube-node-unpacker.sh:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
node-docker-image-tags:
|
||||
file.touch:
|
||||
- name: /srv/pillar/docker-images.sls
|
||||
{% else %}
|
||||
kube-proxy-tar:
|
||||
file.managed:
|
||||
- name: /srv/salt/kube-bins/kube-proxy.tar
|
||||
- source: salt://kube-bins/kube-proxy.tar
|
||||
- makedirs: True
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% set is_helium = '0' %}
|
||||
# Super annoying, the salt version on GCE is old enough that 'salt.cmd.run'
|
||||
# isn't supported
|
||||
# Salt Helium doesn't support systemd modules for service running
|
||||
{% if pillar.get('is_systemd') and is_helium == '0' %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/kube-node-unpacker.service
|
||||
- user: root
|
||||
- group: root
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kube-node-unpacker
|
||||
- watch:
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
- file: node-docker-image-tags
|
||||
{% else %}
|
||||
- file: kube-proxy-tar
|
||||
{% endif %}
|
||||
- file: /etc/kubernetes/kube-node-unpacker.sh
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kube-node-unpacker.service
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-node-unpacker:
|
||||
file.managed:
|
||||
- source: salt://kube-node-unpacker/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
kube-node-unpacker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- restart: True
|
||||
- watch:
|
||||
{% if grains.cloud is defined and grains.cloud == 'gce' %}
|
||||
- file: node-docker-image-tags
|
||||
{% else %}
|
||||
- file: kube-proxy-tar
|
||||
{% endif %}
|
||||
- file: /etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
{% endif %}
|
@ -1,95 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-node-unpacker
|
||||
# Required-Start: $local_fs $network $syslog docker
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Node Unpacker
|
||||
# Description:
|
||||
# Unpacks docker images on Kubernetes nodes
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Node Unpacker"
|
||||
NAME=kube-node-unpacker
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBE_MASTER_ADDONS_SH=/etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
${KUBE_MASTER_ADDONS_SH} </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
|
||||
echo $! > ${PIDFILE}
|
||||
disown
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
kill $(cat ${PIDFILE})
|
||||
rm ${PIDFILE}
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE $KUBE_MASTER_ADDONS_SH $NAME
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
@ -1,9 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Node Unpacker
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/etc/kubernetes/kube-node-unpacker.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
|
||||
let loadedImageFlags=0
|
||||
|
||||
while true; do
|
||||
restart_docker=false
|
||||
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
timeout 120 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ "${rc}" == 0 ]]; then
|
||||
let loadedImageFlags="${loadedImageFlags}|1"
|
||||
elif [[ "${rc}" == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [[ "${loadedImageFlags}" == 1 ]]; then break; fi
|
||||
|
||||
# Sometimes docker load hang, restart docker daemon resolve the issue
|
||||
if [[ "${restart_docker}" ]]; then service docker restart; fi
|
||||
|
||||
# sleep for 15 seconds before attempting to load docker images again
|
||||
sleep 15
|
||||
|
||||
done
|
||||
|
||||
# Now exit. After kube-push, salt will notice that the service is down and it
|
||||
# will start it and new docker images will be loaded.
|
@ -1,12 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
- nicksardo
|
||||
- mrhohn
|
||||
reviewers:
|
||||
- bowei
|
||||
- dnardo
|
||||
- freehan
|
||||
- nicksardo
|
||||
- mrhohn
|
@ -1,40 +0,0 @@
|
||||
/var/lib/kube-proxy/kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kube-proxy/kubeconfig
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
|
||||
# kube-proxy in a static pod
|
||||
{% if pillar.get('kube_proxy_daemonset', '').lower() != 'true' %}
|
||||
/etc/kubernetes/manifests/kube-proxy.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-proxy/kube-proxy.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- context:
|
||||
# Increasing to 100m to avoid CPU starvation on full nodes.
|
||||
# Any change here should be accompanied by a proportional change in CPU
|
||||
# requests of other per-node add-ons (e.g. fluentd).
|
||||
cpurequest: '100m'
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
{% endif %}
|
||||
|
||||
/var/log/kube-proxy.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
#stop legacy kube-proxy service
|
||||
stop_kube-proxy:
|
||||
service.dead:
|
||||
- name: kube-proxy
|
||||
- enable: None
|
@ -1,8 +0,0 @@
|
||||
/etc/kubernetes/manifests/kube-registry-proxy.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-registry-proxy/kube-registry-proxy.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: True
|
||||
- dir_mode: 755
|
@ -1,30 +0,0 @@
|
||||
# Copy kube-scheduler manifest to manifests folder for kubelet.
|
||||
# The ordering of salt states for service docker, kubelet and
|
||||
# master-addon below is very important to avoid the race between
|
||||
# salt restart docker or kubelet and kubelet start master components.
|
||||
# Please see http://issue.k8s.io/10122#issuecomment-114566063
|
||||
# for detail explanation on this very issue.
|
||||
/etc/kubernetes/manifests/kube-scheduler.manifest:
|
||||
file.managed:
|
||||
- source: salt://kube-scheduler/kube-scheduler.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
- require:
|
||||
- service: docker
|
||||
- service: kubelet
|
||||
|
||||
/var/log/kube-scheduler.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
#stop legacy kube-scheduler service
|
||||
stop_kube-scheduler:
|
||||
service.dead:
|
||||
- name: kube-scheduler
|
||||
- enable: None
|
@ -1,192 +0,0 @@
|
||||
{% set daemon_args = "$DAEMON_ARGS" -%}
|
||||
{% if grains['os_family'] == 'RedHat' -%}
|
||||
{% set daemon_args = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
|
||||
# kubeconfig file
|
||||
{% set require_kubeconfig = "" %}
|
||||
{% if grains.kubelet_bootstrap_kubeconfig is defined -%}
|
||||
{% set bootstrap_kubeconfig = "--bootstrap-kubeconfig=" + grains.kubelet_bootstrap_kubeconfig -%}
|
||||
{% else -%}
|
||||
{% set bootstrap_kubeconfig = "" -%}
|
||||
{% endif -%}
|
||||
{% if grains.kubelet_kubeconfig is defined -%}
|
||||
{% set kubeconfig = "--kubeconfig=" + grains.kubelet_kubeconfig -%}
|
||||
{% else -%}
|
||||
{% set kubeconfig = "" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set master_kubelet_args = "" %}
|
||||
|
||||
{% set debugging_handlers = "--enable-debugging-handlers=true" -%}
|
||||
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% if grains.cloud == 'gce' -%}
|
||||
# Unless given a specific directive, disable registration for the kubelet
|
||||
# running on the master.
|
||||
{% if kubeconfig != "" -%}
|
||||
{% set master_kubelet_args = master_kubelet_args + "--register-schedulable=false" -%}
|
||||
{% endif -%}
|
||||
|
||||
# Disable the debugging handlers (/run and /exec) to prevent arbitrary
|
||||
# code execution on the master.
|
||||
# TODO(roberthbailey): Relax this constraint once the master is self-hosted.
|
||||
{% set debugging_handlers = "--enable-debugging-handlers=false" -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% if grains.cloud is defined -%}
|
||||
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_config = "" -%}
|
||||
|
||||
{% set config = "--pod-manifest-path=/etc/kubernetes/manifests" -%}
|
||||
|
||||
{% set manifest_url = "" -%}
|
||||
{% set manifest_url_header = "" -%}
|
||||
{% if pillar.get('enable_manifest_url', '').lower() == 'true' %}
|
||||
{% set manifest_url = "--manifest-url=" + pillar['manifest_url'] + " --manifest-url-header=" + pillar['manifest_url_header'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set hostname_override = "" -%}
|
||||
{% if grains.hostname_override is defined -%}
|
||||
{% set hostname_override = " --hostname-override=" + grains.hostname_override -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cluster_dns = "" %}
|
||||
{% set cluster_domain = "" %}
|
||||
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
{% set cluster_dns = "--cluster-dns=" + pillar['dns_server'] %}
|
||||
{% set cluster_domain = "--cluster-domain=" + pillar['dns_domain'] %}
|
||||
{% endif %}
|
||||
|
||||
{% set docker_root = "" -%}
|
||||
{% if grains.docker_root is defined -%}
|
||||
{% set docker_root = " --docker-root=" + grains.docker_root -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_root = "" -%}
|
||||
{% if grains.kubelet_root is defined -%}
|
||||
{% set kubelet_root = " --root-dir=" + grains.kubelet_root -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set non_masquerade_cidr = "" -%}
|
||||
{% if pillar.get('non_masquerade_cidr','') -%}
|
||||
{% set non_masquerade_cidr = "--non-masquerade-cidr=" + pillar.non_masquerade_cidr -%}
|
||||
{% endif -%}
|
||||
|
||||
# Setup cgroups hierarchies.
|
||||
{% set cgroup_root = "" -%}
|
||||
{% set system_container = "" -%}
|
||||
{% set kubelet_container = "" -%}
|
||||
{% set runtime_container = "" -%}
|
||||
{% if grains['os_family'] == 'Debian' -%}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set cgroup_root = "--cgroup-root=docker" -%}
|
||||
{% else %}
|
||||
{% set cgroup_root = "--cgroup-root=/" -%}
|
||||
{% set system_container = "--system-cgroups=/system" -%}
|
||||
{% set runtime_container = "--runtime-cgroups=/docker-daemon" -%}
|
||||
{% set kubelet_container= "--kubelet-cgroups=/kubelet" -%}
|
||||
{% endif %}
|
||||
{% endif -%}
|
||||
{% if grains['oscodename'] in ['vivid','wily'] -%}
|
||||
{% set cgroup_root = "--cgroup-root=docker" -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set pod_cidr = "" %}
|
||||
{% if grains['roles'][0] == 'kubernetes-master' %}
|
||||
{% if grains.get('cbr-cidr') %}
|
||||
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
|
||||
{% elif kubeconfig == "" and pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
# Kubelet standalone mode needs a PodCIDR since there is no controller-manager
|
||||
{% set pod_cidr = "--pod-cidr=10.76.0.0/16" %}
|
||||
{% endif -%}
|
||||
{% endif %}
|
||||
|
||||
{% set cpu_cfs_quota = "" %}
|
||||
{% if pillar['enable_cpu_cfs_quota'] is defined -%}
|
||||
{% set cpu_cfs_quota = "--cpu-cfs-quota=" + pillar['enable_cpu_cfs_quota'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set feature_gates = "" -%}
|
||||
{% if grains['feature_gates'] is defined -%}
|
||||
{% set feature_gates = "--feature-gates=" + grains['feature_gates'] -%}
|
||||
{% endif %}
|
||||
|
||||
{% set test_args = "" -%}
|
||||
{% if pillar['kubelet_test_args'] is defined -%}
|
||||
{% set test_args=pillar['kubelet_test_args'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set network_plugin = "" -%}
|
||||
{% if pillar.get('network_provider', '').lower() == 'opencontrail' %}
|
||||
{% set network_plugin = "--network-plugin=opencontrail" %}
|
||||
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
|
||||
{% set network_plugin = "--network-plugin=cni --cni-conf-dir=/etc/cni/net.d/ --cni-bin-dir=/home/kubernetes/bin/" %}
|
||||
{% elif pillar.get('network_policy_provider', '').lower() == 'calico' and grains['roles'][0] != 'kubernetes-master' %}
|
||||
{% set network_plugin = "--network-plugin=cni --cni-conf-dir=/etc/cni/net.d/ --cni-bin-dir=/home/kubernetes/bin/" %}
|
||||
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
|
||||
{% set network_plugin = "--network-plugin=kubenet --cni-bin-dir=/home/kubernetes/bin/" -%}
|
||||
{% endif -%}
|
||||
|
||||
# Don't pipe the --hairpin-mode flag by default. This allows the kubelet to pick
|
||||
# an appropriate value.
|
||||
{% set hairpin_mode = "" -%}
|
||||
# The master cannot see Services because it doesn't run kube-proxy, so we don't
|
||||
# need to make its container bridge promiscuous. We also don't want to set
|
||||
# the hairpin-veth flag on the master because it increases the chances of
|
||||
# running into the kernel bug described in #20096.
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set hairpin_mode = "--hairpin-mode=none" -%}
|
||||
{% elif pillar['hairpin_mode'] is defined and pillar['hairpin_mode'] in ['promiscuous-bridge', 'hairpin-veth', 'none'] -%}
|
||||
{% set hairpin_mode = "--hairpin-mode=" + pillar['hairpin_mode'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_port = "" -%}
|
||||
{% if pillar['kubelet_port'] is defined -%}
|
||||
{% set kubelet_port="--port=" + pillar['kubelet_port'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['kubelet_test_log_level'] is defined -%}
|
||||
{% set log_level = pillar['kubelet_test_log_level'] -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set enable_custom_metrics = "" -%}
|
||||
{% if pillar['enable_custom_metrics'] is defined -%}
|
||||
{% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set kube_proxy_ds_label = "" %}
|
||||
{% if grains['roles'][0] != 'kubernetes-master' and pillar.get('kube_proxy_daemonset', '').lower() == 'true' %}
|
||||
# Add kube-proxy daemonset label to node to avoid situation during cluster
|
||||
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
|
||||
{% set kube_proxy_ds_label = "beta.kubernetes.io/kube-proxy-ds-ready=true," %}
|
||||
{% endif %}
|
||||
{% set node_labels = kube_proxy_ds_label + pillar['node_labels'] %}
|
||||
{% if grains['roles'][0] != 'kubernetes-master' and pillar['non_master_node_labels'] is defined -%}
|
||||
{% set node_labels = pillar['non_master_node_labels'] + "," + node_labels %}
|
||||
{% endif %}
|
||||
{% if node_labels != "" %}
|
||||
{% set node_labels="--node-labels=" + node_labels %}
|
||||
{% endif %}
|
||||
|
||||
{% set node_taints = "" %}
|
||||
{% if pillar['node_taints'] is defined -%}
|
||||
{% set node_taints="--register-with-taints=" + pillar['node_taints'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set eviction_hard = "" %}
|
||||
{% if pillar['eviction_hard'] is defined -%}
|
||||
{% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
|
||||
{% endif -%}
|
||||
|
||||
{% set kubelet_auth = "--anonymous-auth=false --authorization-mode=Webhook --client-ca-file=" + pillar.get('ca_cert_bundle_path', '/var/lib/kubelet/ca.crt') %}
|
||||
{% set pki=" --cert-dir=/var/lib/kubelet/pki" -%}
|
||||
|
||||
# test_args has to be kept at the end, so they'll overwrite any prior configuration
|
||||
DAEMON_ARGS="{{daemon_args}} {{bootstrap_kubeconfig}} {{kubeconfig}} {{require_kubeconfig}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{node_taints}} {{eviction_hard}} {{kubelet_auth}} {{pki}} {{feature_gates}} {{test_args}}"
|
@ -1,106 +0,0 @@
|
||||
{% if pillar.get('is_systemd') %}
|
||||
{% set environment_file = '/etc/sysconfig/kubelet' %}
|
||||
{% else %}
|
||||
{% set environment_file = '/etc/default/kubelet' %}
|
||||
{% endif %}
|
||||
|
||||
{{ environment_file}}:
|
||||
file.managed:
|
||||
- source: salt://kubelet/default
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kube-bins/kubelet
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
/var/lib/kubelet/pki:
|
||||
file.directory:
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
# The default here is that this file is blank. If this is the case, the kubelet
|
||||
# won't be able to parse it as JSON and it will not be able to publish events
|
||||
# to the apiserver. You'll see a single error line in the kubelet start up file
|
||||
# about this.
|
||||
/var/lib/kubelet/bootstrap-kubeconfig:
|
||||
file.managed:
|
||||
- source: salt://kubelet/bootstrap-kubeconfig
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
|
||||
{% if grains.cloud != 'gce' %}
|
||||
/var/lib/kubelet/ca.crt:
|
||||
file.managed:
|
||||
- source: salt://kubelet/ca.crt
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
- makedirs: true
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('is_systemd') %}
|
||||
|
||||
{{ pillar.get('systemd_system_path') }}/kubelet.service:
|
||||
file.managed:
|
||||
- source: salt://kubelet/kubelet.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
# The service.running block below doesn't work reliably
|
||||
# Instead we run our script which e.g. does a systemd daemon-reload
|
||||
# But we keep the service block below, so it can be used by dependencies
|
||||
# TODO: Fix this
|
||||
fix-service-kubelet:
|
||||
cmd.wait:
|
||||
- name: /opt/kubernetes/helpers/services bounce kubelet
|
||||
- watch:
|
||||
- file: /var/lib/kubelet/pki
|
||||
- file: /usr/local/bin/kubelet
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/bootstrap-kubeconfig
|
||||
{% if grains.cloud != 'gce' %}
|
||||
- file: /var/lib/kubelet/ca.crt
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kubelet:
|
||||
file.managed:
|
||||
- source: salt://kubelet/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kubelet:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: /usr/local/bin/kubelet
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- file: {{ pillar.get('systemd_system_path') }}/kubelet.service
|
||||
{% else %}
|
||||
- file: /etc/init.d/kubelet
|
||||
{% endif %}
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
- file: /usr/lib/systemd/system/kubelet.service
|
||||
{% endif %}
|
||||
- file: {{ environment_file }}
|
||||
- file: /var/lib/kubelet/bootstrap-kubeconfig
|
||||
{% if grains.cloud != 'gce' %}
|
||||
- file: /var/lib/kubelet/ca.crt
|
||||
{% endif %}
|
||||
{% if pillar.get('is_systemd') %}
|
||||
- provider:
|
||||
- service: systemd
|
||||
{%- endif %}
|
@ -1,126 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kubelet
|
||||
# Required-Start: $local_fs $network $syslog
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: The Kubernetes node container manager
|
||||
# Description:
|
||||
# The Kubernetes container manager maintains docker state against a state file.
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="The Kubernetes container manager"
|
||||
NAME=kubelet
|
||||
DAEMON=/usr/local/bin/kubelet
|
||||
DAEMON_ARGS=""
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
DAEMON_USER=root
|
||||
|
||||
# Exit if the package is not installed
|
||||
[ -x "$DAEMON" ] || exit 0
|
||||
|
||||
# Read configuration variable file if it is present
|
||||
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
# Avoid a potential race at boot time when both monit and init.d start
|
||||
# the same service
|
||||
PIDS=$(pidof $DAEMON)
|
||||
for PID in ${PIDS}; do
|
||||
kill -9 $PID
|
||||
done
|
||||
|
||||
# Return
|
||||
# 0 if daemon has been started
|
||||
# 1 if daemon was already running
|
||||
# 2 if daemon could not be started
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --background --no-close \
|
||||
--make-pidfile --pidfile $PIDFILE \
|
||||
--exec $DAEMON -c $DAEMON_USER -- \
|
||||
$DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
# Return
|
||||
# 0 if daemon has been stopped
|
||||
# 1 if daemon was already stopped
|
||||
# 2 if daemon could not be stopped
|
||||
# other if a failure occurred
|
||||
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
|
||||
RETVAL="$?"
|
||||
[ "$RETVAL" = 2 ] && return 2
|
||||
# Many daemons don't delete their pidfiles when they exit.
|
||||
rm -f $PIDFILE
|
||||
return "$RETVAL"
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet Server
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=/etc/sysconfig/kubelet
|
||||
ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS"
|
||||
Restart=always
|
||||
RestartSec=2s
|
||||
StartLimitInterval=0
|
||||
KillMode=process
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -1,6 +0,0 @@
|
||||
approvers:
|
||||
- bowei
|
||||
- nicksardo
|
||||
reviewers:
|
||||
- bowei
|
||||
- nicksardo
|
@ -1,17 +0,0 @@
|
||||
/etc/kubernetes/manifests/glbc.manifest:
|
||||
file.managed:
|
||||
- source: salt://l7-gcp/glbc.manifest
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
- dir_mode: 755
|
||||
|
||||
/var/log/glbc.log:
|
||||
file.managed:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user