diff --git a/CHANGELOG-1.10.md b/CHANGELOG-1.10.md index 5ee0bce858d..b3a09c7ce88 100644 --- a/CHANGELOG-1.10.md +++ b/CHANGELOG-1.10.md @@ -1,61 +1,68 @@ -- [v1.10.7](#v1107) - - [Downloads for v1.10.7](#downloads-for-v1107) +- [v1.10.8](#v1108) + - [Downloads for v1.10.8](#downloads-for-v1108) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.10.6](#changelog-since-v1106) - - [Action Required](#action-required) + - [Changelog since v1.10.7](#changelog-since-v1107) - [Other notable changes](#other-notable-changes) -- [v1.10.6](#v1106) - - [Downloads for v1.10.6](#downloads-for-v1106) +- [v1.10.7](#v1107) + - [Downloads for v1.10.7](#downloads-for-v1107) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.10.5](#changelog-since-v1105) - - [Action Required](#action-required-1) + - [Changelog since v1.10.6](#changelog-since-v1106) + - [Action Required](#action-required) - [Other notable changes](#other-notable-changes-1) -- [v1.10.5](#v1105) - - [Downloads for v1.10.5](#downloads-for-v1105) +- [v1.10.6](#v1106) + - [Downloads for v1.10.6](#downloads-for-v1106) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.10.4](#changelog-since-v1104) - - [Action Required](#action-required-2) + - [Changelog since v1.10.5](#changelog-since-v1105) + - [Action Required](#action-required-1) - [Other notable changes](#other-notable-changes-2) -- [v1.10.4](#v1104) - - [Downloads for v1.10.4](#downloads-for-v1104) +- [v1.10.5](#v1105) + - [Downloads for v1.10.5](#downloads-for-v1105) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.10.3](#changelog-since-v1103) + - [Changelog since v1.10.4](#changelog-since-v1104) + - [Action Required](#action-required-2) - [Other notable changes](#other-notable-changes-3) -- [v1.10.3](#v1103) - - [Downloads for v1.10.3](#downloads-for-v1103) +- [v1.10.4](#v1104) + - [Downloads for v1.10.4](#downloads-for-v1104) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.10.2](#changelog-since-v1102) + - [Changelog since v1.10.3](#changelog-since-v1103) - [Other notable changes](#other-notable-changes-4) -- [v1.10.2](#v1102) - - [Downloads for v1.10.2](#downloads-for-v1102) +- [v1.10.3](#v1103) + - [Downloads for v1.10.3](#downloads-for-v1103) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) - - [Changelog since v1.10.1](#changelog-since-v1101) + - [Changelog since v1.10.2](#changelog-since-v1102) - [Other notable changes](#other-notable-changes-5) -- [v1.10.1](#v1101) - - [Downloads for v1.10.1](#downloads-for-v1101) +- [v1.10.2](#v1102) + - [Downloads for v1.10.2](#downloads-for-v1102) - [Client Binaries](#client-binaries-6) - [Server Binaries](#server-binaries-6) - [Node Binaries](#node-binaries-6) - - [Changelog since v1.10.0](#changelog-since-v1100) + - [Changelog since v1.10.1](#changelog-since-v1101) - [Other notable changes](#other-notable-changes-6) -- [v1.10.0](#v1100) - - [Downloads for v1.10.0](#downloads-for-v1100) +- [v1.10.1](#v1101) + - [Downloads for v1.10.1](#downloads-for-v1101) - [Client Binaries](#client-binaries-7) - [Server Binaries](#server-binaries-7) - [Node Binaries](#node-binaries-7) + - [Changelog since v1.10.0](#changelog-since-v1100) + - [Other notable changes](#other-notable-changes-7) +- [v1.10.0](#v1100) + - [Downloads for v1.10.0](#downloads-for-v1100) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Major Themes](#major-themes) - [Node](#node) - [Storage](#storage) @@ -69,7 +76,7 @@ - [Before Upgrading](#before-upgrading) - [Known Issues](#known-issues) - [Deprecations](#deprecations) - - [Other Notable Changes](#other-notable-changes-7) + - [Other Notable Changes](#other-notable-changes-8) - [Apps](#apps) - [AWS](#aws) - [Auth](#auth-1) @@ -92,69 +99,141 @@ - [External Dependencies](#external-dependencies) - [v1.10.0-rc.1](#v1100-rc1) - [Downloads for v1.10.0-rc.1](#downloads-for-v1100-rc1) - - [Client Binaries](#client-binaries-8) - - [Server Binaries](#server-binaries-8) - - [Node Binaries](#node-binaries-8) - - [Changelog since v1.10.0-beta.4](#changelog-since-v1100-beta4) - - [Other notable changes](#other-notable-changes-8) -- [v1.10.0-beta.4](#v1100-beta4) - - [Downloads for v1.10.0-beta.4](#downloads-for-v1100-beta4) - [Client Binaries](#client-binaries-9) - [Server Binaries](#server-binaries-9) - [Node Binaries](#node-binaries-9) - - [Changelog since v1.10.0-beta.3](#changelog-since-v1100-beta3) + - [Changelog since v1.10.0-beta.4](#changelog-since-v1100-beta4) - [Other notable changes](#other-notable-changes-9) -- [v1.10.0-beta.3](#v1100-beta3) - - [Downloads for v1.10.0-beta.3](#downloads-for-v1100-beta3) +- [v1.10.0-beta.4](#v1100-beta4) + - [Downloads for v1.10.0-beta.4](#downloads-for-v1100-beta4) - [Client Binaries](#client-binaries-10) - [Server Binaries](#server-binaries-10) - [Node Binaries](#node-binaries-10) - - [Changelog since v1.10.0-beta.2](#changelog-since-v1100-beta2) + - [Changelog since v1.10.0-beta.3](#changelog-since-v1100-beta3) - [Other notable changes](#other-notable-changes-10) -- [v1.10.0-beta.2](#v1100-beta2) - - [Downloads for v1.10.0-beta.2](#downloads-for-v1100-beta2) +- [v1.10.0-beta.3](#v1100-beta3) + - [Downloads for v1.10.0-beta.3](#downloads-for-v1100-beta3) - [Client Binaries](#client-binaries-11) - [Server Binaries](#server-binaries-11) - [Node Binaries](#node-binaries-11) - - [Changelog since v1.10.0-beta.1](#changelog-since-v1100-beta1) - - [Action Required](#action-required-3) + - [Changelog since v1.10.0-beta.2](#changelog-since-v1100-beta2) - [Other notable changes](#other-notable-changes-11) -- [v1.10.0-beta.1](#v1100-beta1) - - [Downloads for v1.10.0-beta.1](#downloads-for-v1100-beta1) +- [v1.10.0-beta.2](#v1100-beta2) + - [Downloads for v1.10.0-beta.2](#downloads-for-v1100-beta2) - [Client Binaries](#client-binaries-12) - [Server Binaries](#server-binaries-12) - [Node Binaries](#node-binaries-12) - - [Changelog since v1.10.0-alpha.3](#changelog-since-v1100-alpha3) - - [Action Required](#action-required-4) + - [Changelog since v1.10.0-beta.1](#changelog-since-v1100-beta1) + - [Action Required](#action-required-3) - [Other notable changes](#other-notable-changes-12) -- [v1.10.0-alpha.3](#v1100-alpha3) - - [Downloads for v1.10.0-alpha.3](#downloads-for-v1100-alpha3) +- [v1.10.0-beta.1](#v1100-beta1) + - [Downloads for v1.10.0-beta.1](#downloads-for-v1100-beta1) - [Client Binaries](#client-binaries-13) - [Server Binaries](#server-binaries-13) - [Node Binaries](#node-binaries-13) - - [Changelog since v1.10.0-alpha.2](#changelog-since-v1100-alpha2) + - [Changelog since v1.10.0-alpha.3](#changelog-since-v1100-alpha3) + - [Action Required](#action-required-4) - [Other notable changes](#other-notable-changes-13) -- [v1.10.0-alpha.2](#v1100-alpha2) - - [Downloads for v1.10.0-alpha.2](#downloads-for-v1100-alpha2) +- [v1.10.0-alpha.3](#v1100-alpha3) + - [Downloads for v1.10.0-alpha.3](#downloads-for-v1100-alpha3) - [Client Binaries](#client-binaries-14) - [Server Binaries](#server-binaries-14) - [Node Binaries](#node-binaries-14) - - [Changelog since v1.10.0-alpha.1](#changelog-since-v1100-alpha1) - - [Action Required](#action-required-5) + - [Changelog since v1.10.0-alpha.2](#changelog-since-v1100-alpha2) - [Other notable changes](#other-notable-changes-14) -- [v1.10.0-alpha.1](#v1100-alpha1) - - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) +- [v1.10.0-alpha.2](#v1100-alpha2) + - [Downloads for v1.10.0-alpha.2](#downloads-for-v1100-alpha2) - [Client Binaries](#client-binaries-15) - [Server Binaries](#server-binaries-15) - [Node Binaries](#node-binaries-15) + - [Changelog since v1.10.0-alpha.1](#changelog-since-v1100-alpha1) + - [Action Required](#action-required-5) + - [Other notable changes](#other-notable-changes-15) +- [v1.10.0-alpha.1](#v1100-alpha1) + - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) + - [Client Binaries](#client-binaries-16) + - [Server Binaries](#server-binaries-16) + - [Node Binaries](#node-binaries-16) - [Changelog since v1.9.0](#changelog-since-v190) - [Action Required](#action-required-6) - - [Other notable changes](#other-notable-changes-15) + - [Other notable changes](#other-notable-changes-16) +# v1.10.8 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.10/examples) + +## Downloads for v1.10.8 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes.tar.gz) | `8af88c2aa340fd4801e39374572e4e4b876b0c723327fe32286bb4a0b3f5c1fd` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-src.tar.gz) | `23e5e78b3d96c7acf955bedb38d0d2f03e5d70f9a263d3e571689b12fc90fd5b` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-darwin-386.tar.gz) | `c93ce57df402dfcdb5887a3d32ba8604c63df9ab17c8ec7bf29d9c0bd783949c` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-darwin-amd64.tar.gz) | `d04ef3fb961421f49015f4b9236d2ab43a2f2f1504640bb363482bebed7b63bf` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-386.tar.gz) | `aa4710010d16287335f90e58e417748238cd6a24c7e3ec46acb04b49582e8089` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-amd64.tar.gz) | `925594f5e3b062323701e3d751b2a4fbd5aa7f48f5e6271b6b9aa8280dee7e7b` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-arm.tar.gz) | `a399dc143942ddad294eeeda3fdcd7ed2c8a1b3616d046aea310b68e01b46a6b` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-arm64.tar.gz) | `6e68974fec812f7f4470c4d42145719e81d703fe7c8c2b757b26fee660bbc42d` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-ppc64le.tar.gz) | `8d99975699753d95439643f835f125c098bbe5617f221e1dff9ee4c89cf2f488` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-linux-s390x.tar.gz) | `76990333dc565ba0c75e0e1bc6544c3c60439339ec3e450d98c43e706b4f1dbe` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-windows-386.tar.gz) | `13d6c632602f933fa742d6feb2b503e30b42cf5adcd50b63fdb89fea08cc5a0a` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-client-windows-amd64.tar.gz) | `883b4b04a5afde5b624499d25d768160105257697212b3fe58229f5f84db6350` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-server-linux-amd64.tar.gz) | `e09463d6336099a20509387b29dd6e2cd510636cdc1d5a925a0543152bbab531` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-server-linux-arm.tar.gz) | `2b2da5e3e436ca7bb9c30cc018ed094478bafb441370c09689ca0269a3b36fcc` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-server-linux-arm64.tar.gz) | `1d21cb8aac0431bba97d7705c4938311021b1aa686e017b0e797d0045ce4957b` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-server-linux-ppc64le.tar.gz) | `234d27c6ae5434ce9238bd5b117a49eae044171ae303b258e54839d9e678dc93` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-server-linux-s390x.tar.gz) | `6765a12700e4c14ddda36659fb78480d189ef2283806349d9ea15fe2b19181fe` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-linux-amd64.tar.gz) | `6894cf0d0b1d9dbd156c15168a1e1f1232497b891cbb8d23c701f59e42f551fe` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-linux-arm.tar.gz) | `ee4d65b6f717604f4b14ed04e1ff8c0850b53e36ff2999db19d5754067f8871a` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-linux-arm64.tar.gz) | `65c9d79ed0b460191df5956f7bac64212839a0296b40b32331e0f8d0a7a32d84` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-linux-ppc64le.tar.gz) | `ea887de37de4681472878b9fb3d5cf7294d92660195b20c3282de54389a7becb` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-linux-s390x.tar.gz) | `b387e375370e4ec2a7b48b8126b39503906c5a094e769b5fd2847910a6fbdc61` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.10.8/kubernetes-node-windows-amd64.tar.gz) | `c79849df2946e12ab4e51161d2fdffceaae49d3d26268c2afff69aa028ac2411` + +## Changelog since v1.10.7 + +### Other notable changes + +* [fluentd-gcp-scaler addon] Bump fluentd-gcp-scaler to 0.4 to pick up security fixes. ([#67691](https://github.com/kubernetes/kubernetes/pull/67691), [@loburm](https://github.com/loburm)) + * [prometheus-to-sd addon] Bump prometheus-to-sd to 0.3.1 to pick up security fixes, bug fixes and new features. + * [event-exporter addon] Bump event-exporter to 0.2.3 to pick up security fixes. +* Fix validation for HealthzBindAddress in kube-proxy when --healthz-port is set to 0 ([#66138](https://github.com/kubernetes/kubernetes/pull/66138), [@wsong](https://github.com/wsong)) +* Role, ClusterRole and their bindings for cloud-provider is put under system namespace. Their addonmanager mode switches to EnsureExists. ([#67224](https://github.com/kubernetes/kubernetes/pull/67224), [@grayluck](https://github.com/grayluck)) +* Bump ip-masq-agent to v2.1.1 ([#67916](https://github.com/kubernetes/kubernetes/pull/67916), [@MrHohn](https://github.com/MrHohn)) + * - Update debian-iptables image for CVEs. + * - Change chain name to IP-MASQ to be compatible with the + * pre-injected masquerade rules. +* Fix VMWare VM freezing bug by reverting [#51066](https://github.com/kubernetes/kubernetes/pull/51066) ([#67825](https://github.com/kubernetes/kubernetes/pull/67825), [@nikopen](https://github.com/nikopen)) +* support cross resource group for azure file ([#68117](https://github.com/kubernetes/kubernetes/pull/68117), [@andyzhangx](https://github.com/andyzhangx)) +* Return apiserver panics as 500 errors instead terminating the apiserver process. ([#68001](https://github.com/kubernetes/kubernetes/pull/68001), [@sttts](https://github.com/sttts)) +* Cluster Autoscaler 1.2.3 (release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.2.3) ([#68348](https://github.com/kubernetes/kubernetes/pull/68348), [@losipiuk](https://github.com/losipiuk)) +* Fix scheduler informers to receive events for all the pods in the cluster. ([#63003](https://github.com/kubernetes/kubernetes/pull/63003), [@bsalamat](https://github.com/bsalamat)) +* attachdetach controller attaches volumes immediately when Pod's PVCs are bound ([#66863](https://github.com/kubernetes/kubernetes/pull/66863), [@cofyc](https://github.com/cofyc)) +* PVC may not be synced to controller local cache in time if PV is bound by external PV binder (e.g. kube-scheduler), double check if PVC is not found to prevent reclaiming PV wrongly. ([#67062](https://github.com/kubernetes/kubernetes/pull/67062), [@cofyc](https://github.com/cofyc)) +* kube-apiserver now includes all registered API groups in discovery, including registered extension API group/versions for unavailable extension API servers. ([#66932](https://github.com/kubernetes/kubernetes/pull/66932), [@nilebox](https://github.com/nilebox)) + + + # v1.10.7 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.10/examples) @@ -2325,7 +2404,7 @@ filename | sha256 hash # v1.10.0-alpha.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.10/examples) ## Downloads for v1.10.0-alpha.3 @@ -2410,7 +2489,7 @@ filename | sha256 hash # v1.10.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.10/examples) ## Downloads for v1.10.0-alpha.2 @@ -2607,7 +2686,7 @@ filename | sha256 hash # v1.10.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.10/examples) ## Downloads for v1.10.0-alpha.1 diff --git a/CHANGELOG-1.11.md b/CHANGELOG-1.11.md index 1c2227b079c..fb9f5c4039a 100644 --- a/CHANGELOG-1.11.md +++ b/CHANGELOG-1.11.md @@ -207,7 +207,7 @@ filename | sha256 hash # v1.11.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.2 @@ -301,7 +301,7 @@ filename | sha256 hash # v1.11.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.1 @@ -390,7 +390,7 @@ filename | sha256 hash # v1.11.0 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0 @@ -1106,7 +1106,7 @@ You can now bind tokens to service requests. ([ref](https://github.com/kubernete # v1.11.0-rc.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-rc.3 @@ -1167,7 +1167,7 @@ filename | sha256 hash # v1.11.0-rc.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-rc.2 @@ -1227,7 +1227,7 @@ filename | sha256 hash # v1.11.0-rc.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-rc.1 @@ -1310,7 +1310,7 @@ filename | sha256 hash # v1.11.0-beta.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-beta.2 @@ -1457,7 +1457,7 @@ filename | sha256 hash # v1.11.0-beta.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.11/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-beta.1 @@ -1676,7 +1676,7 @@ controllerManagerExtraVolumes: # v1.11.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-alpha.2 @@ -1782,7 +1782,7 @@ filename | sha256 hash # v1.11.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.11.0-alpha.1 diff --git a/CHANGELOG-1.12.md b/CHANGELOG-1.12.md index 8dfb8a14720..c9cfaf0ecc8 100644 --- a/CHANGELOG-1.12.md +++ b/CHANGELOG-1.12.md @@ -1,33 +1,953 @@ -- [v1.12.0-beta.2](#v1120-beta2) - - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) +- [v1.12.0](#v1120) + - [Downloads for v1.12.0](#downloads-for-v1120) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - - [Action Required](#action-required) - - [Other notable changes](#other-notable-changes) -- [v1.12.0-beta.1](#v1120-beta1) - - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) + - [Known Issues](#known-issues) + - [Major Themes](#major-themes) + - [SIG API Machinery](#sig-api-machinery) + - [SIG-autoscaling](#sig-autoscaling) + - [SIG-Azure](#sig-azure) +- [Adding Azure Availability Zones support to cloud provider.](#adding-azure-availability-zones-support-to-cloud-provider) +- [Supporting Cross RG resources (disks, Azure File and node [Experimental]](#supporting-cross-rg-resources-disks-azure-file-and-node-experimental) + - [SIG-cli](#sig-cli) + - [SIG-cloud-provider](#sig-cloud-provider) + - [SIG-cluster-lifecycle](#sig-cluster-lifecycle) + - [SIG-ibmcloud](#sig-ibmcloud) + - [SIG-instrumentation](#sig-instrumentation) + - [SIG-node](#sig-node) + - [SIG-OpenStack](#sig-openstack) + - [SIG-scheduling](#sig-scheduling) + - [SIG-service-catalog](#sig-service-catalog) + - [SIG-storage](#sig-storage) + - [SIG-vmware](#sig-vmware) + - [SIG-windows](#sig-windows) + - [Action Required](#action-required) + - [Deprecations and removals](#deprecations-and-removals) + - [New Features](#new-features) + - [API Changes](#api-changes) + - [Other Notable Changes](#other-notable-changes) + - [SIG API Machinery](#sig-api-machinery-1) + - [SIG Apps](#sig-apps) + - [SIG Auth](#sig-auth) + - [SIG Autoscaling](#sig-autoscaling-1) + - [SIG AWS](#sig-aws) + - [SIG Azure](#sig-azure-1) + - [SIG CLI](#sig-cli-1) + - [SIG Cloud Provider](#sig-cloud-provider-1) + - [SIG Cluster Lifecycle](#sig-cluster-lifecycle-1) + - [SIG GCP](#sig-gcp) + - [SIG Instrumentation](#sig-instrumentation-1) + - [SIG Network](#sig-network) + - [SIG Node](#sig-node-1) + - [SIG OpenStack](#sig-openstack-1) + - [SIG Scheduling](#sig-scheduling-1) + - [SIG Storage](#sig-storage-1) + - [SIG VMWare](#sig-vmware-1) + - [SIG Windows](#sig-windows-1) + - [Other Notable Changes](#other-notable-changes-1) + - [Bug Fixes](#bug-fixes) + - [Not Very Notable (that is, non-user-facing)](#not-very-notable-that-is-non-user-facing) + - [External Dependencies](#external-dependencies) +- [v1.12.0-rc.2](#v1120-rc2) + - [Downloads for v1.12.0-rc.2](#downloads-for-v1120-rc2) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) - - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-1) -- [v1.12.0-alpha.1](#v1120-alpha1) - - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Changelog since v1.12.0-rc.1](#changelog-since-v1120-rc1) + - [Other notable changes](#other-notable-changes-2) +- [v1.12.0-rc.1](#v1120-rc1) + - [Downloads for v1.12.0-rc.1](#downloads-for-v1120-rc1) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.11.0](#changelog-since-v1110) + - [Changelog since v1.12.0-beta.2](#changelog-since-v1120-beta2) + - [Action Required](#action-required-1) + - [Other notable changes](#other-notable-changes-3) +- [v1.12.0-beta.2](#v1120-beta2) + - [Downloads for v1.12.0-beta.2](#downloads-for-v1120-beta2) + - [Client Binaries](#client-binaries-3) + - [Server Binaries](#server-binaries-3) + - [Node Binaries](#node-binaries-3) + - [Changelog since v1.12.0-beta.1](#changelog-since-v1120-beta1) - [Action Required](#action-required-2) - - [Other notable changes](#other-notable-changes-2) + - [Other notable changes](#other-notable-changes-4) +- [v1.12.0-beta.1](#v1120-beta1) + - [Downloads for v1.12.0-beta.1](#downloads-for-v1120-beta1) + - [Client Binaries](#client-binaries-4) + - [Server Binaries](#server-binaries-4) + - [Node Binaries](#node-binaries-4) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Action Required](#action-required-3) + - [Other notable changes](#other-notable-changes-5) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) + - [Changelog since v1.11.0](#changelog-since-v1110) + - [Action Required](#action-required-4) + - [Other notable changes](#other-notable-changes-6) +# v1.12.0 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.12.0 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes.tar.gz) | `a3db4289ed722db75e51b50f6070d9ec4237c6da0c15e306846d88f4ac5d23c632e1e91c356f54be8abbaa8826c2e416adcc688612dfcb3dd9b92724e45dbefe` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-src.tar.gz) | `d7c1b837095eb1c0accdbe56020a4f9e64ecc8856fb95f872ff1eacc932948630f62df1d848320cf29f380ce8683c0e150b1a8ac815f1a00e29c5bd33061c1eb` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-darwin-386.tar.gz) | `a78608d8a1a88219425d9c6266acbf3d93bf1541862cef4c84a6b0bf4741d80f34c91eb1997587d370f69df2df07af261b724bb8ab6080528df7a65c73239471` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-darwin-amd64.tar.gz) | `eea9201e28dff246730cf43134584df0f94a3de05d1a88191ed62c20ebdab40ce9eae97852571fbc991e9b26f5e0f7042578a5113a75cec1773233e800408fd6` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-386.tar.gz) | `11c5d6629cd8cbcf9ca241043774ca93085edc642b878afb77b3cef2ef26f8b018af1ade362ed742d3781975ed3b4c227b7364e44e5de4d0d96382ddeac3d764` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-amd64.tar.gz) | `41d976898cd56a2899bfdcac028a54f2ea5b729320908004bdb3ea33576a1d0f25baa61e12a14c9eb011d876db56b4be91221a1f0898b471f0908b38a2fdf280` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-arm.tar.gz) | `c7f363effbbbaddc85d933d4b86f5b56ce6e6472e763ae59ff6888084280a4efda21c4447afba80a479ac6b021094cb31a02c9bd522da866643c084bc03515df` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-arm64.tar.gz) | `8dd0ef808d75e4456aa3fd3d109248280f7436be9c72790d99a8cd7643561160569e9ad466c75240d1b195be33241b8020047f78c83b8671b210e9eff201a644` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-ppc64le.tar.gz) | `eff7b0cab10adad04558a24be283c990466380b0dcd0f71be25ac4421c88fec7291e895503308539058cfe178a7b6d4e7b1974c6cb57e2e59853e04ae626d2c3` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-linux-s390x.tar.gz) | `535fb787c8b26f4dcf9b159a7cd00ea482c4e14d5fc2cd150402ba8ea2ccfb28c2cdae73843b31b689ad8c20ccd18a6caf82935e2bdf0a7778aa2ce6aa94b17c` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-windows-386.tar.gz) | `11036a56d60c5e9ee12f02147ca9f233498a008c901e1e68196444be961440f5d544e1ca180930183f01e2a486a17e4634324e2453a5d0239504680089075aa7` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-client-windows-amd64.tar.gz) | `e560abcb8fbe733ec7d945d9e12f6e7a873dd3c0fd1cbe1ecd369775f9374f289242778deea80c47d46d62a0e392b5b64d8dc3bd1258cec088c20508b3af2c4d` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-server-linux-amd64.tar.gz) | `093d44afc221c9bdf6d5d825726404efbb07b882ca4f69186ec681273f24875f8b8b0065bceba27b1ec1727bf08ba2d0d73649ec48d5e48872b2635c21b5313c` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-server-linux-arm.tar.gz) | `a3178ed50562d24b63e27fa9bd99ccd1b244dea508b537ad08c49ce78bb4ba0fea606216135aea67b89329a0185cc27abfc36513ff186adca8ec39bb72cef9ae` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-server-linux-arm64.tar.gz) | `b8bf707dabd0710fbc4590ce75a63773339e00f32779a4b59c5039b94888acfe96689ef76a1599a870d51bd56db62d60e1c22b08b163717b3581dea7c82ad293` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-server-linux-ppc64le.tar.gz) | `a9d8e1eef7f3a548b44ebb9df3f9f6b5592773d4b89bbe17842242b8c9bb67331a4513255f54169a602933da8a731f6a8820b88c73f2c1e21f5c9d50f6d0ee07` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-server-linux-s390x.tar.gz) | `e584d42d7059ed917dcc66e328e20ef15487ccc2b0ebffa43f0c466633d8ac49d6e0f6cbdf5f9b3824cd8575acbcca02f7815651ea13616ae1043dd7d518de2d` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-linux-amd64.tar.gz) | `6e0d16a21bd0f9a84222838cf75532a32df350b08b5073b3dbbc3338720daf6a1c24927ee191175d2d07a5b9d3d8bf6b5aaf3cfef6dfeb1f010c6a5f442e5e5e` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-linux-arm.tar.gz) | `8509894b54a6e0d42aef637ef84443688e2f8ee0942b33842651e5760aad6f8283045a2bd55b8e4f43dcf63aa43a743920be524752d520d50f884dff4dd8d441` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-linux-arm64.tar.gz) | `f1555af73cf96d12e632b2cf42f2c4ac962d8da25fb41f36d768428a93544bee0fdcc86237e5d15d513e71795a63f39aa0c192127c3835fc1f89edd3248790a1` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-linux-ppc64le.tar.gz) | `fb23f3021350d3f60df4ccab113f927f3521fd1f91851e028eb05e246fe6269c25ebe0dc4257b797c61d36accab6772a3bcced0b5208e61b96756890f09aae55` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-linux-s390x.tar.gz) | `fbf6cb2273ab4d253693967a5ee111b5177dd23b08a26d33c1e90ec6e5bf2f1d6877858721ecdd7ad583cbfb548020ac025261bf3ebb6184911ce6f0fb1d0b20` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0/kubernetes-node-windows-amd64.tar.gz) | `fdec44561ef0e4d50c6a256aa6eb7255e5da4f6511e91f08d0e579ff13c91faa42e1e07a7992ad2a03b234d636c5f708c9a08437d837bb24e724caaec90dbf69` + +> - Start SHA: 91e7b4fd31fcd3d5f436da26c980becec37ceefe +> - End Sha: 337e0e18f1aefa199bd0a1786f8eab42e948064c + +## Known Issues + +- Feature [#566](https://github.com/kubernetes/kubernetes/issues/566) enabling CoreDNS as the default for kube-up deployments was dropped from the release due to a scalability memory resource consumption issue observed. If a cluster operator is considering using CoreDNS on a cluster greater than 2000 nodes, it may be necessary to give more consideration to CoreDNS pod memory resource limits and experimentally measure that memory usage versus cluster resource availability. +- kube-controller-manager currently needs a writable `--cert-dir` (default is `/var/run/kubernetes`) for generating self-signed certificates, when no `--tls-cert-file` or `--tls-private-key-file` are provided. +- The `system:kube-controller-manager` ClusterRole lacks permission to `get` the `configmap` extension-apiserver-authentication. kube-controller-manager errors if run with a service account bound to the clusterrole. +- Runtime handler and Windows npipe protocol are not supported yet in crictl v1.11.x. Those features will be supported in crictl [v1.12.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.12.0), together with Kubernetes v1.12.1. + +## Major Themes + +### SIG API Machinery + +SIG API work this cycle involved development of the "dry run" functionality, which enables users to see the results of a particular command without persisting those changes. + +### SIG-autoscaling + +SIG Autoscaling focused on improving the Horizontal Pod Autoscaling API and algorithm: +- We released autoscaling/v2beta2, which cleans up and unifies the API +- We improved readiness detection and smoothing to work well in a larger variety or use cases + +### SIG-Azure + +Sig Azure was focused on two primary new alpha features: +# Adding Azure Availability Zones support to cloud provider. +# Supporting Cross RG resources (disks, Azure File and node [Experimental] + +Besides the above new features, support for Azure Virtual Machine Scale Sets (VMSS) and Cluster-Autoscaler is now stable and considered GA: + +- Azure virtual machine scale sets (VMSS) allow you to create and manage identical load balanced +VMs that automatically increase or decrease based on demand or a set schedule. +- With this new stable feature, Kubernetes supports the scaling of containerized applications +with Azure VMSS, including the ability to integrate it with cluster-autoscaler to automatically +adjust the size of the Kubernetes clusters based on the same conditions. + +### SIG-cli + +SIG CLI focused on implementing the new plugin mechanism, providing a library with common CLI tooling for plugin authors and further refactorings of the code. + +### SIG-cloud-provider + +This is the first Kubernetes release for this SIG! In v1.12, SIG Cloud Provider focused on building the processes and infrastructure to better support existing and new cloud providers. Some of these initiatives (many of which are still in progress) are: + +- Reporting E2E conformance test results to TestGrid from every cloud provider (in collaboration with SIG Testing & SIG Release) +- Defining minimum required documentation from each cloud provider which includes (in collaboration with SIG Docs): + - example manifests for the kube-apiserver, kube-controller-manager, kube-schedule, kubelet, and the cloud-controller-manager + - labels/annotations that are consumed by any cloud specific controllers + +In addition to the above, SIG Cloud Provider has been focusing on a long running effort to remove cloud provider code from kubernetes/kubernetes. + +### SIG-cluster-lifecycle + +In 1.12, SIG Cluster lifecycle has focused on improving the user experience in kubeadm, by fixing a number of bugs and adding some new important features. + +Here is a list of some of the changes that have been made to kubeadm: + +- Kubeadm internal config has been promoted to `v1alpha3`: + - `v1alpha1` has been removed. + - `v1alpha3` has split apart `MasterConfiguration` into separate components; `InitConfiguration`, `ClusterConfiguration`, `JoinConfiguration`, `KubeletConfiguration`, and `KubeProxyConfiguration` + - Different configuration types can be supplied all in the same file separated by `---`. +- Improved CRI handling + - crictl is no longer required in docker-only setups. + - Better detection of installed CRI. + - Better output for image pull errors. +- Improved air-gapped and offline support + - kubeadm now handles air-gapped environments by using the local client version as a fallback. + - Some kubeadm commands are now allowed to work in a completely offline mode. +- Certificate handling improvements: + - Renew certs as part of upgrade. + - New `kubeadm alpha phase certs renew` command for renewing certificates. + - Certificates created with kubeadm now have improved uniqueness of Distinguished Name fields. +- HA improvements: + - `kubeadm join --experimental-control-plane` can now be used to join control plane instances to an existing cluster. + - `kubeadm upgrade node experimental-control-plane` can now be used for upgrading secondary control plane instances created with `kubeadm join --experimental-control-plane`. +Multi-arch support (EXPERIMENTAL): + - kubeadm now adds support for docker “schema 2” manifest lists. This is preliminary part of the process of making kubeadm based k8s deployments to support multiple architectures. +Deprecating features: + - The Alpha feature-gates HighAvailability, SelfHosting, CertsInSecrets are now deprecated, and will be removed in k8s v1.13.0. + +### SIG-ibmcloud + +As a newly created SIG, the SIG-ibmcloud has mainly focused on SIG set up, sharing IBM Clouds ongoing Kubernetes work like scalability tests, Kubernetes upgrade strategy etc. with the SIG members and start working on processes to move cloud provider code to a public GitHub repo. + +### SIG-instrumentation + +No feature work, but a large refactoring of metrics-server as well as a number of bug fixes. + +### SIG-node + +SIG-node graduated the PodShareProcessNamespace feature from alpha to beta. This feature allows a pod spec to request that all containers in a pod share a common process namespaces. + +Two alpha features were also added in this release. + +The RuntimeClass alpha feature enables a node to surface multiple runtime options to support a variety of workload types. Examples include native linux containers, and “sandboxed” containers that isolate the container from the host kernel. + +The CustomCFSQuotaPeriod alpha feature enables node administrators to change the default period used to enforce CFS quota on a node. This can improve performance for some workloads that experience latency while using CFS quota with the default measurement period. Finally, the SIG continues to focus on improving reliability by fixing bugs while working out design and implementation of future features. + +### SIG-OpenStack + +SIG-OpenStack development was primarily focused on fixing bugs and improving feature parity with OpenStack resources. New features were primarily limited to the external provider in an effort to drive adoption of the OpenStack external provider over the in-tree provider. + +In-tree bug fixes and improvements included: +- Fix load balancer status without VIP. +- Fix filtering of server status. +- Fix resizing PVC of Cinder volume. +- Disable load balancer configuration if it is not defined in cloud config. +- Add support for node shutdown taint. + +The external provider includes all of the above with the additional fixes and features: +- Fix bug to prevent allocation of existing floating IP. +- Fix Cinder authentication bug when OS_DOMAIN_NAME not specified. +- Fix Keystone authentication errors by skipping synchronization for unscoped tokens. +- Fix authentication error for client-auth-plugin +- Fix dependency references from in-tree-provider to point to external provider. +- Add shutdown instance by Provider ID. +- Add annotation to preserve floating IP after service delete. +- Add conformance testing to stable and development branches. +- Add support support to Manilla for trustee authentication and supplying custom CAs. +- Add and update documentation. +- Add support to Manilla for provisioning existing shares. +- Add cluster name to load balancer description +- Add synchronization between Kubernetes and Keystone projects +- Add use internal DNS name for 'hostname' of nodes. +- Add support for CSI spec v0.3.0 for both Cinder and Manilla +- Add 'cascade delete' support for Octavia load balancers to improve performance. +- Add improved load balancer naming. + +### SIG-scheduling + +SIG Scheduling development efforts have been primarily focused on improving performance and reliability of the scheduler. +- Performance of the inter-pod affinity/anti-affinity feature is improved over 100X via algorithmic optimization. +- DaemonSet pods, which used to be scheduled by the DaemonSet controller, will be scheduled by the default scheduler in 1.12. This change allows DaemonSet pods to enjoy all the scheduling features of the default scheduler. +- The Image Locality priority function of the scheduler has been improved and is now enabled by default. With this feature enabled, nodes that have all or a partial set of images required for running a pod are preferred over other nodes, which improves pod start-up time. +- TaintNodeByCondition has been moved to Beta and is enabled by default. +- Scheduler throughput has been improved by ~50% in large clusters (>2000 nodes). + +### SIG-service-catalog +- The Originating Identity feature, which lets the broker know which user that performed an action, is now GA. +- [Namespaced Brokers](https://svc-cat.io/docs/namespaced-broker-resources/), which enable operators to install a broker into a namespace instead of the cluster level, reached GA. +- The [Service Plan Defaults](https://svc-cat.io/docs/service-plan-defaults/) feature is in alpha and is under active development. This feature gives operators the ability to define defaults for when someone provisions a service. +- We now support [filtering which services are exposed by Service Catalog](https://svc-cat.io/docs/catalog-restrictions/). +- We have also Improved the CLI experience both for kubectl and svcat by improving the output formatting, and by adding more commands. + +### SIG-storage + +SIG Storage promoted the [Kubernetes volume topology feature](https://github.com/kubernetes/features/issues/490) to beta. This enables Kubernetes to understand and act intelligently on volume accessibility information (such as the “zone” a cloud volume is provisioned in, the “rack” that a SAN array is accessible from, and so on). + +The [dynamic maximum volume count](https://github.com/kubernetes/features/issues/554) feature was also moved to beta. This enables a volume plugin to specify the maximum number of a given volume type per node as a function of the node characteristics (for example, a larger limit for larger nodes, a smaller limit for smaller nodes). + +SIG Storage also worked on a number of [Container Storage Interface (CSI) features](https://github.com/kubernetes/features/issues/178) this quarter in anticipation of moving support for CSI from beta to GA in the next Kubernetes release. This includes graduating the dependent “mount namespace propagation” feature to GA, moving the Kubelet plugin registration mechanism to beta, adding alpha support for a new CSI driver registry as well as for topology, and adding a number of alpha features to support the use of CSI for “local ephemeral volumes” (that is, volumes that exist for the lifecycle of a pod and contain some injected information, like a token or secret). + +With Kubernetes v1.12, SIG Storage also introduced alpha support for [volume snapshotting](https://github.com/kubernetes/features/issues/177). This feature introduces the ability to create/delete volume snapshots and create new volumes from a snapshot using the Kubernetes API. + +### SIG-vmware + +SIG-VMware development was primarily focused on fixing bugs for the in-tree cloud provider, starting the development of the external cloud provider and taking ownership of the cluster-api provider for vSphere. + +In-tree cloud provider bug fixes and improvements included: +- Adding initial Zones support to the provider using vSphere Tags +- Improving the testing harness for the cloud provider by introducing vcsim for automated testing +- Fixing a bug that was preventing updates from 1.10 to 1.11 + +The external cloud provider was established and reached feature parity with in-tree, and we expect to stabilize it and have it as preferred deployment model by 1.13. We are also getting started on externalizing the vSphere volume functionalities in a CSI plugin to fully reproduce the current in-tree storage functionality. + +The Cluster API effort is currently undergoing a complete rehaul of the existing codebase, moving off Terraform and into using govmomi directly. + +### SIG-windows + +SIG Windows focused on stability and reliability of our existing feature set. We primarily fixed bugs as we march towards a near future stable release. + +## Action Required + +- etcd2 as a backend is deprecated and support will be removed in Kubernetes 1.13. +- The --storage-versions flag of kube-apiserver is now deprecated. This flag should be omitted to ensure the default storage versions are used. Otherwise the cluster is not safe to upgrade to a version newer than 1.12. This flag will be removed in 1.13. ([#68080](https://github.com/kubernetes/kubernetes/pull/68080), [@caesarxuchao](https://github.com/caesarxuchao)) Courtesy of SIG API Machinery +- Volume dynamic provisioning scheduling has been moved to beta, which means that the DynamicProvisioningScheduling alpha feature gate has been removed but the VolumeScheduling beta feature gate is still required for this feature. ([#67432](https://github.com/kubernetes/kubernetes/pull/67432), [@lichuqiang](https://github.com/lichuqiang)) Courtesy of SIG Apps, SIG Architecture, SIG Storage, and SIG Testing +- The API server and client-go libraries have been fixed to support additional non-alpha-numeric characters in UserInfo "extra" data keys. Both should be updated in order to properly support extra data containing "/" characters or other characters disallowed in HTTP headers. ([#65799](https://github.com/kubernetes/kubernetes/pull/65799), [@dekkagaijin](https://github.com/dekkagaijin)) Courtesy of SIG Auth +- The `NodeConfiguration` kind in the kubeadm v1alpha2 API has been renamed `JoinConfiguration` in v1alpha3 ([#65951](https://github.com/kubernetes/kubernetes/pull/65951), [@luxas](https://github.com/luxas)) Courtesy of SIG Cluster Lifecycle +- The `MasterConfiguration` kind in the kubeadm v1alpha2 API has been renamed `InitConfiguration` in v1alpha3 ([#65945](https://github.com/kubernetes/kubernetes/pull/65945), [@luxas](https://github.com/luxas)) Courtesy of SIG Cluster Lifecycle +- The formerly publicly-available cAdvisor web UI that the kubelet started using `--cadvisor-port` has been entirely removed in 1.12. The recommended way to run cAdvisor if you still need it, is via a DaemonSet. ([#65707](https://github.com/kubernetes/kubernetes/pull/65707), [@dims](https://github.com/dims)) +- Cluster Autoscaler version has been updated to 1.3.1-beta.1. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.3.1-beta.1 ([#65857](https://github.com/kubernetes/kubernetes/pull/65857), [@aleksandra-malinowska](https://github.com/aleksandra-malinowska)) Courtesy of SIG Autoscaling +- kubeadm: The `v1alpha1` config API has been removed. ([#65628](https://github.com/kubernetes/kubernetes/pull/65628), [@luxas](https://github.com/luxas)) Courtesy of SIG Cluster Lifecycle +- kube-apiserver: When using `--enable-admission-plugins` the `Priority` admission plugin is now enabled by default (matching changes in 1.11.1+). If using `--admission-control` to fully specify the set of admission plugins, it is now necessary to add the `Priority` admission plugin for the PodPriority feature to work properly. ([#65739](https://github.com/kubernetes/kubernetes/pull/65739), [@liggitt](https://github.com/liggitt)) Courtesy of SIG Scheduling +- The `system-node-critical` and `system-cluster-critical` priority classes are now limited to the `kube-system` namespace by the `PodPriority` admission plugin (matching changes in 1.11.1+). ([#65593](https://github.com/kubernetes/kubernetes/pull/65593), [@bsalamat](https://github.com/bsalamat)) Courtesy of SIG Scheduling +- kubeadm: Control plane images (etcd, kube-apiserver, kube-proxy, etc.) no longer use arch suffixes. Arch suffixes are kept for kube-dns only. ([#66960](https://github.com/kubernetes/kubernetes/pull/66960), +[@rosti](https://github.com/rosti)) Courtesy of SIG Cluster Lifecycle, SIG Release, and SIG Testing +- kubeadm - Feature-gates HighAvailability, SelfHosting, CertsInSecrets are now deprecated and can no longer be used for new clusters. Cluster updates using above feature-gates flag is not supported. ([#67786](https://github.com/kubernetes/kubernetes/pull/67786), [@fabriziopandini](https://github.com/fabriziopandini)) Courtesy of SIG Cluster Lifecycle +- 'KubeSchedulerConfiguration' which used to be under GroupVersion 'componentconfig/v1alpha1', +is now under 'kubescheduler.config.k8s.io/v1alpha1'. ([#66916](https://github.com/kubernetes/kubernetes/pull/66916), [@dixudx](https://github.com/dixudx)) Courtesy of SIG Cluster Lifecycle, SIG Scheduling, and SIG Testing +- The flag `--skip-preflight-checks` of kubeadm has been removed. Please use `--ignore-preflight-errors` instead. ([#62727](https://github.com/kubernetes/kubernetes/pull/62727), [@xiangpengzhao](https://github.com/xiangpengzhao)) +- If Openstack LoadBalancer is not defined in cloud config, the loadbalancer will no longer beis not initialized. any more in openstack. All setups must have some setting under that section for the OpenStack provider. ([#65781](https://github.com/kubernetes/kubernetes/pull/65781), [@zetaab](https://github.com/zetaab)) + +## Deprecations and removals + +- Kubeadm: The Alpha feature-gates HighAvailability, SelfHosting, CertsInSecrets are now deprecated, and will be removed in k8s v1.13.0. +- The cloudstack and ovirt controllers have been deprecated and will be removed in a future version. ([#68199](https://github.com/kubernetes/kubernetes/pull/68199), [@dims](https://github.com/dims)) +- All kubectl run generators have been deprecated except for run-pod/v1. This is part of a move to make `kubectl run` simpler, enabling it create only pods; if additional resources are needed, you should use `kubectl create` instead. ([#68132](https://github.com/kubernetes/kubernetes/pull/68132), [@soltysh](https://github.com/soltysh)) +- The deprecated --interactive flag has been removed from kubectl logs. ([#65420](https://github.com/kubernetes/kubernetes/pull/65420), [@jsoref](https://github.com/jsoref)) +-The deprecated shorthand flag `-c` has been removed from `kubectl version (--client)`. ([#66817](https://github.com/kubernetes/kubernetes/pull/66817), [@charrywanganthony](https://github.com/charrywanganthony)) +- The `--pod` flag (`-p` shorthand) of the kubectl exec command has been marked as deprecated, and will be removed in a future version. This flag is currently optional. ([#66558](https://github.com/kubernetes/kubernetes/pull/66558), [@quasoft](https://github.com/quasoft)) +- kubectl: `--use-openapi-print-columns` has been deprecated in favor of `--server-print`, and will be removed in a future version. ([#65601](https://github.com/kubernetes/kubernetes/pull/65601), [@liggitt](https://github.com/liggitt)) +- The watch API endpoints prefixed with `/watch` are deprecated and will be removed in a future release. These standard method for watching resources (supported since v1.0) is to use the list API endpoints with a `?watch=true` parameter. All client-go clients have used the parameter method since v1.6.0. ([#65147](https://github.com/kubernetes/kubernetes/pull/65147), [@liggitt](https://github.com/liggitt)) +- Using the Horizontal Pod Autoscaler with metrics from Heapster is now deprecated and will be disabled in a future version. ([#68089](https://github.com/kubernetes/kubernetes/pull/68089), [@DirectXMan12](https://github.com/DirectXMan12)) +- The watch API endpoints prefixed with `/watch` are deprecated and will be removed in a future release. These standard method for watching resources (supported since v1.0) is to use the list API endpoints with a `?watch=true` parameter. All client-go clients have used the parameter method since v1.6.0. ([#65147](https://github.com/kubernetes/kubernetes/pull/65147), [@liggitt](https://github.com/liggitt)) + +## New Features + +- Kubernetes now registers volume topology information reported by a node-level Container Storage Interface (CSI) driver. This enables Kubernetes support of CSI topology mechanisms. ([#67684](https://github.com/kubernetes/kubernetes/pull/67684), [@verult](https://github.com/verult)) Courtesy of SIG API Machinery, SIG Node, SIG Storage, and SIG Testing +- Addon-manager has been bumped to v8.7 ([#68299](https://github.com/kubernetes/kubernetes/pull/68299), [@MrHohn](https://github.com/MrHohn)) Courtesy of SIG Cluster Lifecycle, and SIG Testing +- The CSI volume plugin no longer needs an external attacher for non-attachable CSI volumes. ([#67955](https://github.com/kubernetes/kubernetes/pull/67955), [@jsafrane](https://github.com/jsafrane)) Courtesy of SIG API Machinery, SIG Node, SIG Storage, and SIG Testing +- KubeletPluginsWatcher feature graduated to beta. ([#68200](https://github.com/kubernetes/kubernetes/pull/68200), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) Courtesy of SIG Node, SIG Storage, and SIG Testing +- A TTL mechanism has been added to clean up Jobs after they finish. ([#66840](https://github.com/kubernetes/kubernetes/pull/66840), [@janetkuo](https://github.com/janetkuo)) Courtesy of SIG API Machinery, SIG Apps, SIG Architecture, and SIG Testing +- The scheduler is now optimized to throttle computational tasks involved with node selection. ([#67555](https://github.com/kubernetes/kubernetes/pull/67555), [@wgliang](https://github.com/wgliang)) Courtesy of SIG API Machinery, and SIG Scheduling +- The performance of Pod affinity/anti-affinity in the scheduler has been improved. ([#67788](https://github.com/kubernetes/kubernetes/pull/67788), [@ahmad-diaa](https://github.com/ahmad-diaa)) Courtesy of SIG Scalability, and SIG Scheduling +- A kubelet parameter and config option has been added to change the CFS quota period from the default 100ms to some other value between 1µs and 1s. This was done to improve response latencies for workloads running in clusters with guaranteed and burstable QoS classes. ([#63437](https://github.com/kubernetes/kubernetes/pull/63437), [@szuecs](https://github.com/szuecs)) Courtesy of SIG API Machinery, SIG Apps, SIG Architecture, SIG CLI,, SIG Node, and SIG Scheduling +- Secure serving on port 10258 to cloud-controller-manager (configurable via `--secure-port`) is now enabled. Delegated authentication and authorization are to be configured using the same flags as for aggregated API servers. Without configuration, the secure port will only allow access to `/healthz`. ([#67069](https://github.com/kubernetes/kubernetes/pull/67069), [@sttts](https://github.com/sttts)) Courtesy of SIG Auth, and SIG Cloud Provider +- The commands `kubeadm alpha phases renew ` have been added. ([#67910](https://github.com/kubernetes/kubernetes/pull/67910), [@liztio](https://github.com/liztio)) Courtesy of SIG API Machinery, and SIG Cluster Lifecycle +- ProcMount has been added to SecurityContext and AllowedProcMounts has been added to PodSecurityPolicy to allow paths in the container's /proc to not be masked. ([#64283](https://github.com/kubernetes/kubernetes/pull/64283), [@jessfraz](https://github.com/jessfraz)) Courtesy of SIG API Machinery, SIG Apps, SIG Architecture, and SIG Node +- Secure serving on port 10257 to kube-controller-manager (configurable via `--secure-port`) is now enabled. Delegated authentication and authorization are to be configured using the same flags as for aggregated API servers. Without configuration, the secure port will only allow access to `/healthz`. ([#64149](https://github.com/kubernetes/kubernetes/pull/64149), [@sttts](https://github.com/sttts)) Courtesy of SIG API Machinery, SIG Auth, SIG Cloud Provider, SIG Scheduling, and SIG Testing +- Azure cloud provider now supports unmanaged nodes (such as on-prem) that are labeled with `kubernetes.azure.com/managed=false` and `alpha.service-controller.kubernetes.io/exclude-balancer=true` ([#67984](https://github.com/kubernetes/kubernetes/pull/67984), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure, and SIG Cloud Provider +- SCTP is now supported as an additional protocol (alpha) alongside TCP and UDP in Pod, Service, Endpoint, and NetworkPolicy. ([#64973](https://github.com/kubernetes/kubernetes/pull/64973), [@janosi](https://github.com/janosi)) Courtesy of SIG API Machinery, SIG Apps, SIG Architecture, SIG CLI, SIG Cloud Provider, SIG Cluster Lifecycle, SIG Network, SIG Node, and SIG Scheduling +- Autoscaling/v2beta2 and custom_metrics/v1beta2 have been introduced, which implement metric selectors for Object and Pods metrics, as well as allowing AverageValue targets on Objects, similar to External metrics. ([#64097](https://github.com/kubernetes/kubernetes/pull/64097), [@damemi](https://github.com/damemi)) Courtesy of SIG API Machinery, SIG Architecture, SIG Autoscaling, SIG CLI, and SIG Testing +- kubelet: Users can now enable the alpha NodeLease feature gate to have the Kubelet create and periodically renew a Lease in the kube-node-lease namespace. The lease duration defaults to 40s, and can be configured via the kubelet.config.k8s.io/v1beta1.KubeletConfiguration's NodeLeaseDurationSeconds field. ([#66257](https://github.com/kubernetes/kubernetes/pull/66257), [@mtaufen](https://github.com/mtaufen)) Courtesy of SIG API Machinery, SIG Apps, SIG Architecture, SIG Cluster Lifecycle, SIG Node, and SIG Testing +- PodReadinessGate is now turned on by default. ([#67406](https://github.com/kubernetes/kubernetes/pull/67406), [@freehan](https://github.com/freehan)) Courtesy of SIG Node +- Azure cloud provider now supports cross resource group nodes that are labeled with `kubernetes.azure.com/resource-group=` and `alpha.service-controller.kubernetes.io/exclude-balancer=true` ([#67604](https://github.com/kubernetes/kubernetes/pull/67604), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure, SIG Cloud Provider, and SIG Storage +- Annotations are now supported for remote admission webhooks. ([#58679](https://github.com/kubernetes/kubernetes/pull/58679), [@CaoShuFeng](https://github.com/CaoShuFeng)) Courtesy of SIG API Machinery, and SIG Auth +- The scheduler now scores fewer than all nodes in every scheduling cycle. This can improve performance of the scheduler in large clusters. ([#66733](https://github.com/kubernetes/kubernetes/pull/66733), [@bsalamat](https://github.com/bsalamat)) Courtesy of SIG Scheduling +- Node affinity for Azure unzoned managed disks has been added. ([#67229](https://github.com/kubernetes/kubernetes/pull/67229), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure +- The Attacher/Detacher interfaces for local storage have been refactored ([#66884](https://github.com/kubernetes/kubernetes/pull/66884), [@NickrenREN](https://github.com/NickrenREN)) Courtesy of SIG Storage +- DynamicProvisioningScheduling and VolumeScheduling is now supported for Azure managed disks. Feature gates DynamicProvisioningScheduling and VolumeScheduling should be enabled before using this feature. ([#67121](https://github.com/kubernetes/kubernetes/pull/67121), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure, and SIG Storage +- The audit.k8s.io api group has been upgraded from v1beta1 to v1. ([#65891](https://github.com/kubernetes/kubernetes/pull/65891), [@CaoShuFeng](https://github.com/CaoShuFeng)) Courtesy of SIG API Machinery +- The quota admission configuration API graduated to v1beta1. ([#66156](https://github.com/kubernetes/kubernetes/pull/66156), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) Courtesy of SIG Node, and SIG Scheduling +- Kube-apiserver --help flag help is now printed in sections. ([#64517](https://github.com/kubernetes/kubernetes/pull/64517), [@sttts](https://github.com/sttts)) +- Azure managed disks now support availability zones and new parameters `zoned`, `zone` and `zones` are added for AzureDisk storage class. ([#66553](https://github.com/kubernetes/kubernetes/pull/66553), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure +- Kubectl create job command has been added. ([#60316](https://github.com/kubernetes/kubernetes/pull/60316), [@soltysh](https://github.com/soltysh)) Courtesy of SIG CLI +- Kubelet serving certificate bootstrapping and rotation has been promoted to beta status. ([#66726](https://github.com/kubernetes/kubernetes/pull/66726), [@liggitt](https://github.com/liggitt)) Courtesy of SIG Auth, and SIG Node +- Azure nodes with availability zone will now have label `failure-domain.beta.kubernetes.io/zone=-`. ([#66242](https://github.com/kubernetes/kubernetes/pull/66242), [@feiskyer](https://github.com/feiskyer)) Courtesy of SIG Azure +- kubeadm: Default component configs are now printable via kubeadm config print-default ([#66074](https://github.com/kubernetes/kubernetes/pull/66074), [@rosti](https://github.com/rosti)) Courtesy of SIG Cluster Lifecycle +- Mount propagation has been promoted to GA. The `MountPropagation` feature gate is deprecated and will be removed in 1.13. ([#67255](https://github.com/kubernetes/kubernetes/pull/67255), [@bertinatto](https://github.com/bertinatto)) Courtesy of SIG Apps, SIG Architecture, SIG Node, and SIG Storage +- Ubuntu 18.04 (Bionic) series has been added to Juju charms ([#65644](https://github.com/kubernetes/kubernetes/pull/65644), [@tvansteenburgh](https://github.com/tvansteenburgh)) +- kubeadm: The kubeadm configuration now supports the definition of more than one control plane instances with their own APIEndpoint. The APIEndpoint for the "bootstrap" control plane instance should be defined using `InitConfiguration.APIEndpoint`, while the APIEndpoints for additional control plane instances should be added using `JoinConfiguration.APIEndpoint`. ([#67832](https://github.com/kubernetes/kubernetes/pull/67832), [@fabriziopandini](https://github.com/fabriziopandini)) +- Add new `--server-dry-run` flag to `kubectl apply` so that the request will be sent to the server with the dry-run flag (alpha), which means that changes won't be persisted. ([#68069](https://github.com/kubernetes/kubernetes/pull/68069), [@apelisse](https://github.com/apelisse)) +- Introduce CSI Cluster Registration mechanism to ease CSI plugin discovery and allow CSI drivers to customize Kubernetes' interaction with them. ([#67803](https://github.com/kubernetes/kubernetes/pull/67803), [@saad-ali](https://github.com/saad-ali)) +- The PodShareProcessNamespace feature to configure PID namespace sharing within a pod has been promoted to beta. ([#66507](https://github.com/kubernetes/kubernetes/pull/66507), [@verb](https://github.com/verb)) + +## API Changes + +- kubeadm now supports the phase command "alpha phase kubelet config annotate-cri". ([#68449](https://github.com/kubernetes/kubernetes/pull/68449), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm: --cri-socket now defaults to tcp://localhost:2375 when running on Windows. ([#67447](https://github.com/kubernetes/kubernetes/pull/67447), [@benmoss](https://github.com/benmoss)) +- kubeadm now includes a new EXPERIMENTAL `--rootfs`, which (if specified) causes kubeadm to chroot before performing any file operations. This is expected to be useful when setting up kubernetes on a different filesystem, such as invoking kubeadm from docker. ([#54935](https://github.com/kubernetes/kubernetes/pull/54935), [@anguslees](https://github.com/anguslees)) +- The command line option --cri-socket-path of the kubeadm subcommand "kubeadm config images pull" has been renamed to --cri-socket to be consistent with the rest of kubeadm subcommands. +- kubeadm: The ControlPlaneEndpoint was moved from the API config struct to ClusterConfiguration ([#67830](https://github.com/kubernetes/kubernetes/pull/67830), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm: InitConfiguration now consists of two structs: InitConfiguration and ClusterConfiguration ([#67441](https://github.com/kubernetes/kubernetes/pull/67441), [@rosti](https://github.com/rosti)) +- The RuntimeClass API has been added. This feature is in alpha, and the RuntimeClass feature gate must be enabled in order to use it. The RuntimeClass API resource defines different classes of runtimes that may be used to run containers in the cluster. Pods can select a RuntimeClass to use via the RuntimeClassName field. ([#67737](https://github.com/kubernetes/kubernetes/pull/67737), [@tallclair](https://github.com/tallclair)) +- To address the possibility of dry-run requests overwhelming admission webhooks that rely on side effects and a reconciliation mechanism, a new field is being added to `admissionregistration.k8s.io/v1beta1.ValidatingWebhookConfiguration` and `admissionregistration.k8s.io/v1beta1.MutatingWebhookConfiguration` so that webhooks can explicitly register as having dry-run support. If a dry-run request is made on a resource that triggers a non dry-run supporting webhook, the request will be completely rejected, with "400: Bad Request". Additionally, a new field is being added to the `admission.k8s.io/v1beta1.AdmissionReview` API object, exposing to webhooks whether or not the request being reviewed is a dry-run. ([#66936](https://github.com/kubernetes/kubernetes/pull/66936), [@jennybuckley](https://github.com/jennybuckley)) +- CRI now supports a "runtime_handler" field for RunPodSandboxRequest, used for selecting the runtime configuration to run the sandbox with (alpha feature). ([#67518](https://github.com/kubernetes/kubernetes/pull/67518), [@tallclair](https://github.com/tallclair)) +- More fields are allowed at the root of the CRD validation schema when the status subresource is enabled. ([#65357](https://github.com/kubernetes/kubernetes/pull/65357), [@nikhita](https://github.com/nikhita)) +- The --docker-disable-shared-pid kubelet flag has been removed. PID namespace sharing can instead be enable per-pod using the ShareProcessNamespace option. ([#66506](https://github.com/kubernetes/kubernetes/pull/66506), [@verb](https://github.com/verb)) +- Added the --dns-loop-detect option to dnsmasq, which is run by kube-dns. ([#67302](https://github.com/kubernetes/kubernetes/pull/67302), [@dixudx](https://github.com/dixudx)) +- Kubernetes now supports extra `--prune-whitelist` resources in kube-addon-manager. ([#67743](https://github.com/kubernetes/kubernetes/pull/67743), [@Random-Liu](https://github.com/Random-Liu)) +- Graduate Resource Quota ScopeSelectors to beta, and enable it by default. ([#67077](https://github.com/kubernetes/kubernetes/pull/67077), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) +- The OpenAPI spec and documentation now reflect the 202 Accepted response path for delete requests. Note that this change in the openapi spec may affect some clients that depend on the error paths. ([#63418](https://github.com/kubernetes/kubernetes/pull/63418), [@roycaihw](https://github.com/roycaihw)) +- The alpha `Initializers` admission plugin is no longer enabled by default. This matches the off-by-default behavior of the alpha API which drives initializer behavior. ([#66039](https://github.com/kubernetes/kubernetes/pull/66039), [@liggitt](https://github.com/liggitt)) +- Adding validation to kube-scheduler at the API level ([#66799](https://github.com/kubernetes/kubernetes/pull/66799), [@noqcks](https://github.com/noqcks)) +- `DisruptedPods` field in `PodDisruptionBudget` is optional instead of required. ([#63757](https://github.com/kubernetes/kubernetes/pull/63757), [@nak3](https://github.com/nak3)) + +## Other Notable Changes + +### SIG API Machinery + +- `kubectl get apiservice` now shows the target service and whether the service is available ([#67747](https://github.com/kubernetes/kubernetes/pull/67747), [@smarterclayton](https://github.com/smarterclayton)) +- Apiserver panics will now be returned as 500 errors rather than terminating the apiserver process. ([#68001](https://github.com/kubernetes/kubernetes/pull/68001), [@sttts](https://github.com/sttts)) +- API paging is now enabled for custom resource definitions, custom resources and APIService objects. ([#67861](https://github.com/kubernetes/kubernetes/pull/67861), [@liggitt](https://github.com/liggitt)) +- To address the possibility dry-run requests overwhelming admission webhooks that rely on side effects and a reconciliation mechanism, a new field is being added to admissionregistration.k8s.io/v1beta1.ValidatingWebhookConfiguration and admissionregistration.k8s.io/v1beta1.MutatingWebhookConfiguration so that webhooks can explicitly register as having dry-run support. If a dry-run request is made on a resource that triggers a non dry-run supporting webhook, the request will be completely rejected, with "400: Bad Request". Additionally, a new field is being added to the admission.k8s.io/v1beta1.AdmissionReview API object, exposing to webhooks whether or not the request being reviewed is a dry-run. ([#66936](https://github.com/kubernetes/kubernetes/pull/66936), [@jennybuckley](https://github.com/jennybuckley)) +- kube-apiserver now includes all registered API groups in discovery, including registered extension API group/versions for unavailable extension API servers. ([#66932](https://github.com/kubernetes/kubernetes/pull/66932), [@nilebox](https://github.com/nilebox)) +- kube-apiserver: setting a `dryRun` query parameter on a CONNECT request will now cause the request to be rejected, consistent with behavior of other mutating API requests. Examples of CONNECT APIs are the `nodes/proxy`, `services/proxy`, `pods/proxy`, `pods/exec`, and `pods/attach` subresources. Note that this prevents sending a `dryRun` parameter to backends via `{nodes,services,pods}/proxy` subresources. ([#66083](https://github.com/kubernetes/kubernetes/pull/66083), [@jennybuckley](https://github.com/jennybuckley)) +- In clusters where the DryRun feature is enabled, dry-run requests will go through the normal admission chain. Because of this, ImagePolicyWebhook authors should especially make sure that their webhooks do not rely on side effects. ([#66391](https://github.com/kubernetes/kubernetes/pull/66391), [@jennybuckley](https://github.com/jennybuckley)) +- Added etcd_object_count metrics for CustomResources. ([#65983](https://github.com/kubernetes/kubernetes/pull/65983), [@sttts](https://github.com/sttts)) +- The OpenAPI version field will now be properly autopopulated without needing other OpenAPI fields present in generic API server code. ([#66411](https://github.com/kubernetes/kubernetes/pull/66411), [@DirectXMan12](https://github.com/DirectXMan12)) +- TLS timeouts have been extended to work around slow arm64 math/big functions. ([#66264](https://github.com/kubernetes/kubernetes/pull/66264), [@joejulian](https://github.com/joejulian)) +- Kubernetes now checks CREATE admission for create-on-update requests instead of UPDATE admission. ([#65572](https://github.com/kubernetes/kubernetes/pull/65572), [@yue9944882](https://github.com/yue9944882)) +- kube- and cloud-controller-manager can now listen on ports up to 65535 rather than 32768, solving problems with operating systems that request these higher ports.. ([#65860](https://github.com/kubernetes/kubernetes/pull/65860), [@sttts](https://github.com/sttts)) +- LimitRange and Endpoints resources can be created via an update API call if the object does not already exist. When this occurs, an authorization check is now made to ensure the user making the API call is authorized to create the object. In previous releases, only an update authorization check was performed. ([#65150](https://github.com/kubernetes/kubernetes/pull/65150), [@jennybuckley](https://github.com/jennybuckley)) +- More fields are allowed at the root of the CRD validation schema when the status subresource is enabled. ([#65357](https://github.com/kubernetes/kubernetes/pull/65357), [@nikhita](https://github.com/nikhita)) +- api-machinery utility functions `SetTransportDefaults` and `DialerFor` once again respect custom Dial functions set on transports ([#65547](https://github.com/kubernetes/kubernetes/pull/65547), [@liggitt](https://github.com/liggitt)) +- AdvancedAuditing has been promoted to GA, replacing the previous (legacy) audit logging mechanisms. ([#65862](https://github.com/kubernetes/kubernetes/pull/65862), [@loburm](https://github.com/loburm)) +- Added --authorization-always-allow-paths to components doing delegated authorization to exclude certain HTTP paths like /healthz from authorization. ([#67543](https://github.com/kubernetes/kubernetes/pull/67543), [@sttts](https://github.com/sttts)) +- Allow ImageReview backend to return annotations to be added to the created pod. ([#64597](https://github.com/kubernetes/kubernetes/pull/64597), [@wteiken](https://github.com/wteiken)) +- Upon receiving a LIST request with an expired continue token, the apiserver now returns a continue token together with the 410 "the from parameter is too old" error. If the client does not care about getting a list from a consistent snapshot, the client can use this token to continue listing from the next key, but the returned chunk will be from the latest snapshot. ([#67284](https://github.com/kubernetes/kubernetes/pull/67284), [@caesarxuchao](https://github.com/caesarxuchao)) + +### SIG Apps + +- The service controller will now retry creating the load balancer when `persistUpdate` fails due to conflict. ([#68087](https://github.com/kubernetes/kubernetes/pull/68087), [@grayluck](https://github.com/grayluck)) +- The latent controller caches no longer cause repeating deletion messages for deleted pods. ([#67826](https://github.com/kubernetes/kubernetes/pull/67826), [@deads2k](https://github.com/deads2k)) + +### SIG Auth + +- TokenRequest and TokenRequestProjection are now beta features. To enable these feature, the API server needs to be started with the `--service-account-issuer`, `--service-account-signing-key-file`, and `--service-account-api-audiences` flags. +([#67349](https://github.com/kubernetes/kubernetes/pull/67349), [@mikedanese](https://github.com/mikedanese)) +- The admin RBAC role now aggregates edit and view. The edit RBAC role now aggregates view. ([#66684](https://github.com/kubernetes/kubernetes/pull/66684), [@deads2k](https://github.com/deads2k)) +- UserInfo derived from service account tokens created from the TokenRequest API now include the pod name and UID in the Extra field. ([#61858](https://github.com/kubernetes/kubernetes/pull/61858), [@mikedanese](https://github.com/mikedanese)) +- The extension API server can now dynamically discover the requestheader CA certificate when the core API server doesn't use certificate based authentication for it's clients. ([#66394](https://github.com/kubernetes/kubernetes/pull/66394), [@rtripat](https://github.com/rtripat)) + +### SIG Autoscaling + +- Horizontal Pod Autoscaler default update interval has been increased from 30s to 15s, improving HPA reaction time for metric changes. ([#68021](https://github.com/kubernetes/kubernetes/pull/68021), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- To avoid soft-deleted pods incorrectly affecting scale up replica count calculations, the HPA controller will stop counting soft-deleted pods for scaling purposes. ([#67067](https://github.com/kubernetes/kubernetes/pull/67067), [@moonek](https://github.com/moonek)) +- HPA reaction to metric changes has been spend up by removing the scale up forbidden window. ([#66615](https://github.com/kubernetes/kubernetes/pull/66615), [@jbartosik](https://github.com/jbartosik)) + +### SIG AWS + +- AWS LoadBalancer security group ICMP rules now match the documentation of spec.loadBalancerSourceRanges ([#63572](https://github.com/kubernetes/kubernetes/pull/63572), [@haz-mat](https://github.com/haz-mat)) +- The aws cloud provider now reports a `Hostname` address type for nodes based on the `local-hostname` metadata key. ([#67715](https://github.com/kubernetes/kubernetes/pull/67715), [@liggitt](https://github.com/liggitt)) + +### SIG Azure + +- \API calls for Azure instance metadata have been reduced to help avoid "too many requests" errors.. ([#67478](https://github.com/kubernetes/kubernetes/pull/67478), [@feiskyer](https://github.com/feiskyer)) +- Azure Go SDK has been upgraded to v19.0.0 and VirtualMachineScaleSetVM now supports availability zones. ([#66648](https://github.com/kubernetes/kubernetes/pull/66648), [@feiskyer](https://github.com/feiskyer)) +- User Assigned MSI (https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview), which provides for managed identities, is now suppored for Kubernetes clusters on Azure. ([#66180](https://github.com/kubernetes/kubernetes/pull/66180), [@kkmsft](https://github.com/kkmsft)) +- The Azure load balancer idle connection timeout for services is now configurable.([#66045](https://github.com/kubernetes/kubernetes/pull/6605), [@cpuguy83](https://github.com/cpuguy83)) +- When provisioning workloads, Kubernetes will now skip nodes that have a primary NIC in a 'Failed' provisioningState. ([#65412](https://github.com/kubernetes/kubernetes/pull/65412), [@yastij](https://github.com/yastij)) +- The NodeShutdown taint is now supported for Azure. ([#68033](https://github.com/kubernetes/kubernetes/pull/68033), [@yastij](https://github.com/yastij)) + +### SIG CLI + +- Added a sample-cli-plugin staging repository and cli-runtime staging repository to help showcase the new kubectl plugins mechanism. ([#67938](https://github.com/kubernetes/kubernetes/pull/67938), [#67658](https://github.com/kubernetes/kubernetes/pull/67658), [@soltysh](https://github.com/soltysh)) +- The plugin mechanism functionality now closely follows the git plugin design ([#66876](https://github.com/kubernetes/kubernetes/pull/66876), [@juanvallejo](https://github.com/juanvallejo)) +- kubectl patch now respects --local ([#67399](https://github.com/kubernetes/kubernetes/pull/67399), [@deads2k](https://github.com/deads2k)) +- kubectl: When an object can't be updated and must be deleted by force, kubectl will now recreating resources for immutable fields.([#66602](https://github.com/kubernetes/kubernetes/pull/66602), [@dixudx](https://github.com/dixudx)) +- `kubectl create {clusterrole,role}`'s `--resources` flag now supports asterisk to specify all resources. ([#62945](https://github.com/kubernetes/kubernetes/pull/62945), [@nak3](https://github.com/nak3)) +- kubectl: the wait command now prints an error message and exits with the code 1, if there is no resources matching selectors ([#66692](https://github.com/kubernetes/kubernetes/pull/66692), [@m1kola](https://github.com/m1kola)) +- Kubectl now handles newlines for `command`, `args`, `env`, and `annotations` in `kubectl describe` wrapping. ([#66841](https://github.com/kubernetes/kubernetes/pull/66841), [@smarterclayton](https://github.com/smarterclayton)) +- The `kubectl patch` command no longer exits with exit code 1 when a redundant patch results in a no-op ([#66725](https://github.com/kubernetes/kubernetes/pull/66725), [@juanvallejo](https://github.com/juanvallejo)) +- The output of `kubectl get events` has been improved to prioritize showing the message, and to move some fields to `-o wide`. ([#66643](https://github.com/kubernetes/kubernetes/pull/66643), [@smarterclayton](https://github.com/smarterclayton)) +- `kubectl config set-context` can now set attributes of the current context, such as the current namespace, by passing `--current` instead of a specific context name ([#66140](https://github.com/kubernetes/kubernetes/pull/66140), [@liggitt](https://github.com/liggitt)) +- "kubectl delete" no longer waits for dependent objects to be deleted when removing parent resources ([#65908](https://github.com/kubernetes/kubernetes/pull/65908), [@juanvallejo](https://github.com/juanvallejo)) +- A new flag, `--keepalive`, has been introduced, for kubectl proxy to allow setting keep-alive period for long-running request. ([#63793](https://github.com/kubernetes/kubernetes/pull/63793), [@hzxuzhonghu](https://github.com/hzxuzhonghu)) +- kubectl: fixed a regression with --use-openapi-print-columns that would not print object contents ([#65600](https://github.com/kubernetes/kubernetes/pull/65600), [@liggitt](https://github.com/liggitt)) +- The display of jobs in `kubectl get` and `kubectl describe` has been improved to emphasize progress and duration. ([#65463](https://github.com/kubernetes/kubernetes/pull/65463), [@smarterclayton](https://github.com/smarterclayton)) +- CSI volume attributes have been added to kubectl describe pv`. ([#65074](https://github.com/kubernetes/kubernetes/pull/65074), [@wgliang](https://github.com/wgliang)) +- Running `kubectl describe pvc` now shows which pods are mounted to the pvc being described with the `Mounted By` field ([#65837](https://github.com/kubernetes/kubernetes/pull/65837), [@clandry94](https://github.com/clandry94)) +- `kubectl create secret tls` can now read certificate and key files from process substitution arguments ([#67713](https://github.com/kubernetes/kubernetes/pull/67713), [@liggitt](https://github.com/liggitt)) +- `kubectl rollout status` now works for unlimited timeouts. ([#67817](https://github.com/kubernetes/kubernetes/pull/67817), [@tnozicka](https://github.com/tnozicka)) + +### SIG Cloud Provider + +- The cloudstack cloud provider now reports a `Hostname` address type for nodes based on the `local-hostname` metadata key. ([#67719](https://github.com/kubernetes/kubernetes/pull/67719), [@liggitt](https://github.com/liggitt)) +- The OpenStack cloud provider now reports a `Hostname` address type for nodes ([#67748](https://github.com/kubernetes/kubernetes/pull/67748), [@FengyunPan2](https://github.com/FengyunPan2)) +- The vSphere cloud provider now suppoerts zones. ([#66795](https://github.com/kubernetes/kubernetes/pull/66795), [@jiatongw](https://github.com/jiatongw)) + +### SIG Cluster Lifecycle + +- External CAs can now be used for kubeadm with only a certificate, as long as all required certificates already exist. ([#68296](https://github.com/kubernetes/kubernetes/pull/68296), [@liztio](https://github.com/liztio)) +- kubeadm now works better when not connected to the Internet. In addition, common kubeadm commands will now work without an available networking interface. ([#67397](https://github.com/kubernetes/kubernetes/pull/67397), [@neolit123](https://github.com/neolit123)) +- Scrape frequency of metrics-server has been increased to 30s.([#68127](https://github.com/kubernetes/kubernetes/pull/68127), [@serathius](https://github.com/serathius)) +- Kubernetes juju charms will now use CSI for ceph. ([#66523](https://github.com/kubernetes/kubernetes/pull/66523), [@hyperbolic2346](https://github.com/hyperbolic2346)) +- kubeadm uses audit policy v1 instead of v1beta1 ([#67176](https://github.com/kubernetes/kubernetes/pull/67176), [@charrywanganthony](https://github.com/charrywanganthony)) +- Kubeadm nodes will no longer be able to run with an empty or invalid hostname in /proc/sys/kernel/hostname ([#64815](https://github.com/kubernetes/kubernetes/pull/64815), [@dixudx](https://github.com/dixudx)) +- kubeadm now can join the cluster with pre-existing client certificate if provided ([#66482](https://github.com/kubernetes/kubernetes/pull/66482), [@dixudx](https://github.com/dixudx)) +([#66382](https://github.com/kubernetes/kubernetes/pull/66382), [@bart0sh](https://github.com/bart0sh)) +- kubeadm will no longer hang indefinitely if there is no Internet connection and --kubernetes-version is not specified.([#65676](https://github.com/kubernetes/kubernetes/pull/65676), [@dkoshkin](https://github.com/dkoshkin)) +- kubeadm: kube-proxy will now run on all nodes, and not just master nodes.([#65931](https://github.com/kubernetes/kubernetes/pull/65931), [@neolit123](https://github.com/neolit123)) +- kubeadm now uses separate YAML documents for the kubelet and kube-proxy ComponentConfigs. ([#65787](https://github.com/kubernetes/kubernetes/pull/65787), [@luxas](https://github.com/luxas)) +- kubeadm will now print required flags when running `kubeadm upgrade plan`.([#65802](https://github.com/kubernetes/kubernetes/pull/65802), [@xlgao-zju](https://github.com/xlgao-zju)) +- Unix support for ZFS as a valid graph driver has been added for Docker, enabling users to use Kubeadm with ZFS. ([#65635](https://github.com/kubernetes/kubernetes/pull/65635), [@neolit123](https://github.com/neolit123)) + +### SIG GCP + +- GCE: decrease cpu requests on master node, to allow more components to fit on one core machine. ([#67504](https://github.com/kubernetes/kubernetes/pull/67504), [@loburm](https://github.com/loburm)) +- Kubernetes 1.12 includes a large number of metadata agent improvements, including expanding the metadata agent's access to all API groups and removing metadata agent config maps in favor of command line flags. It also includes improvements to the logging agent, such as multiple fixes and adjustments. + ([#66485](https://github.com/kubernetes/kubernetes/pull/66485), [@bmoyles0117](https://github.com/bmoyles0117)) +- cluster/gce: Kubernetes now generates consistent key sizes in config-default.sh using /dev/urandom instead of /dev/random ([#67139](https://github.com/kubernetes/kubernetes/pull/67139), [@yogi-sagar](https://github.com/yogi-sagar)) + +### SIG Instrumentation + + The etcdv3 client can now be monitored by Prometheus. ([#64741](https://github.com/kubernetes/kubernetes/pull/64741), [@wgliang](https://github.com/wgliang)) + +### SIG Network + +- The ip-masq-agent will now be scheduled in all nodes except master due to NoSchedule/NoExecute tolerations. ([#66260](https://github.com/kubernetes/kubernetes/pull/66260), [@tanshanshan](https://github.com/tanshanshan)) +- The CoreDNS service can now be monitored by Prometheus. ([#65589](https://github.com/kubernetes/kubernetes/pull/65589), [@rajansandeep](https://github.com/rajansandeep)) +- Traffic shaping is now supported for the CNI network driver. ([#63194](https://github.com/kubernetes/kubernetes/pull/63194), [@m1093782566](https://github.com/m1093782566)) +- The dockershim now sets the "bandwidth" and "ipRanges" CNI capabilities (dynamic parameters). Plugin authors and administrators can now take advantage of this by updating their CNI configuration file. For more information, see the [CNI docs](https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md#dynamic-plugin-specific-fields-capabilities--runtime-configuration) ([#64445](https://github.com/kubernetes/kubernetes/pull/64445), [@squeed](https://github.com/squeed)) + +### SIG Node + +- RuntimeClass is a new API resource for defining different classes of runtimes that may be used to run containers in the cluster. Pods can select a RunitmeClass to use via the RuntimeClassName field. This feature is in alpha, and the RuntimeClass feature gate must be enabled in order to use it. ([#67737](https://github.com/kubernetes/kubernetes/pull/67737), [@tallclair](https://github.com/tallclair)) +- Sped up kubelet start time by executing an immediate runtime and node status update when the Kubelet sees that it has a CIDR. ([#67031](https://github.com/kubernetes/kubernetes/pull/67031), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- cpumanager will now rollback state if updateContainerCPUSet failed, indicating that the container start failed. This change will prevent CPU leaks. ([#67430](https://github.com/kubernetes/kubernetes/pull/67430), [@choury](https://github.com/choury)) +- [CRI] RunPodSandboxRequest now has a runtime_handler field for selecting the runtime configuration to run the sandbox with. This feature is in alpha for 1.12.. ([#67518](https://github.com/kubernetes/kubernetes/pull/67518), [@tallclair](https://github.com/tallclair)) +- If a container's requested device plugin resource hasn't registered after Kubelet restart, the container start will now fail.([#67145](https://github.com/kubernetes/kubernetes/pull/67145), [@jiayingz](https://github.com/jiayingz)) +- Upgraded TaintNodesByCondition to beta. ([#62111](https://github.com/kubernetes/kubernetes/pull/62111), [@k82cn](https://github.com/k82cn)) +- The PodShareProcessNamespace feature to configure PID namespace sharing within a pod has been promoted to beta. ([#66507](https://github.com/kubernetes/kubernetes/pull/66507), [@verb](https://github.com/verb)) +- The CPU Manager will now validate the state of the node, enabling Kubernetes to maintain the CPU topology even if resources change. ([#66718](https://github.com/kubernetes/kubernetes/pull/66718), [@ipuustin](https://github.com/ipuustin)) +- Added support kubelet plugin watcher in device manager, as part of the new plugin system. ([#58755](https://github.com/kubernetes/kubernetes/pull/58755), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) +- Expose docker registry config for addons used in Juju deployments ([#66092](https://github.com/kubernetes/kubernetes/pull/66092), [@kwmonroe](https://github.com/kwmonroe)) +- `RunAsGroup` which has been broken since 1.10, now works. ([#65926](https://github.com/kubernetes/kubernetes/pull/65926), [@Random-Liu](https://github.com/Random-Liu)) +- The systemd config files are now reloaded before kubelet starts, so changes can take effect([#65702](https://github.com/kubernetes/kubernetes/pull/65702), [@mborsz](https://github.com/mborsz)) +- Hostnames are now converted to lowercase before being used for node lookups in the kubernetes-worker charm. ([#65487](https://github.com/kubernetes/kubernetes/pull/65487), [@dshcherb](https://github.com/dshcherb)) +- kubelets that specify `--cloud-provider` now only report addresses in Node status as determined by the cloud provider (unless `--hostname-override` is used to force reporting of the specified hostname) ([#65594](https://github.com/kubernetes/kubernetes/pull/65594), [@liggitt](https://github.com/liggitt)) +- Kubelet now exposes `/debug/flags/v` to allow dynamically setting glog logging level. For example, to change glog level to 3, you only have to send a PUT request like `curl -X PUT http://127.0.0.1:8080/debug/flags/v -d "3"`. ([#64601](https://github.com/kubernetes/kubernetes/pull/64601), [@hzxuzhonghu](https://github.com/hzxuzhonghu)) + +### SIG OpenStack + +- Openstack now supports the node shutdown taint. The taint is added when an instance is shutdown in openstack. ([#67982](https://github.com/kubernetes/kubernetes/pull/67982), [@zetaab](https://github.com/zetaab)) + +### SIG Scheduling + +- The equivalence class cache has been redesigned to be a two level cache, resulting in a significant increase in scheduling throughput and performance. ([#65714](https://github.com/kubernetes/kubernetes/pull/65714), [@resouer](https://github.com/resouer)) +- kube-scheduler can now listen on ports up to 65535, correcting a problem with certain operating systems that request ports greater than 32768. ([#65833](https://github.com/kubernetes/kubernetes/pull/65833), [@sttts](https://github.com/sttts)) +- Performance of the anti-affinity predicate of the default scheduler has been improved. ([#66948](https://github.com/kubernetes/kubernetes/pull/66948), [@mohamed-mehany](https://github.com/mohamed-mehany)) +- The unreachable taint gets applied to a node when it loses its network connection. ([#67734](https://github.com/kubernetes/kubernetes/pull/67734), [@Huang-Wei](https://github.com/Huang-Wei)) +- If `TaintNodesByCondition` is enabled, add `node.kubernetes.io/unschedulable` and `node.kubernetes.io/network-unavailable` automatically to DaemonSet pods. ([#64954](https://github.com/kubernetes/kubernetes/pull/64954), [@k82cn](https://github.com/k82cn)) + +### SIG Storage + +- The AllowedTopologies field inside StorageClass is now validated against set and map semantics. Specifically, there cannot be duplicate TopologySelectorTerms, MatchLabelExpressions keys, or TopologySelectorLabelRequirement Values. ([#66843](https://github.com/kubernetes/kubernetes/pull/66843), [@verult](https://github.com/verult)) +- A PersistentVolumeClaim may not have been synced to the controller local cache in time if the PersistentVolumeis bound by an external PV binder (such as kube-scheduler), so Kubernetes will now double check if PVC is not found in order to prevent the volume from being incorrectly reclaimed. ([#67062](https://github.com/kubernetes/kubernetes/pull/67062), [@cofyc](https://github.com/cofyc)) +- Filesystems will now be properly unmounted when a backend is not reachable and returns EIO. ([#67097](https://github.com/kubernetes/kubernetes/pull/67097), [@chakri-nelluri](https://github.com/chakri-nelluri)) +- The logic for attaching volumes has been changed so that attachdetach controller attaches volumes immediately when a Pod's PVCs are bound, preventing a problem that caused pods to have extremely long startup times. ([#66863](https://github.com/kubernetes/kubernetes/pull/66863), [@cofyc](https://github.com/cofyc)) +- Dynamic provisions that create iSCSI PVs can now ensure that multipath is used by specifying 2 or more target portals in the PV, which will cause kubelet to wait up to 10 seconds for the multipath device. PVs with just one portal continue to work as before, with kubelet not waiting for the multipath device and just using the first disk it finds. ([#67140](https://github.com/kubernetes/kubernetes/pull/67140), [@bswartz](https://github.com/bswartz)) +- ScaleIO volumes can now be provisioned without having to first manually create /dev/disk/by-id path on each kubernetes node (if not already present). ([#66174](https://github.com/kubernetes/kubernetes/pull/66174), [@ddebroy](https://github.com/ddebroy)) +- Multi-line annotations injected via downward API files will no longer be sorted, scrambling their information. ([#65992](https://github.com/kubernetes/kubernetes/pull/65992), [@liggitt](https://github.com/liggitt)) +- The constructed volume spec for the CSI plugin now includes a volume mode field. ([#65456](https://github.com/kubernetes/kubernetes/pull/65456), [@wenlxie](https://github.com/wenlxie)) +- Kubernetes now includes a metric that reports the number of PVCs that are in-use,with plugin and node name as dimensions, making it possible to figure out how many PVCs each node is using when troubleshooting attach/detach issues. + ([#64527](https://github.com/kubernetes/kubernetes/pull/64527), [@gnufied](https://github.com/gnufied)) +- Added support to restore a volume from a volume snapshot data source. ([#67087](https://github.com/kubernetes/kubernetes/pull/67087), [@xing-yang](https://github.com/xing-yang)) +- When attaching iSCSI volumes, kubelet now scans only the specific LUNs being attached, and also deletes them after detaching. This avoids dangling references to LUNs that no longer exist, which used to be the cause of random I/O errors/timeouts in kernel logs, slowdowns during block-device related operations, and very rare cases of data corruption. +([#63176](https://github.com/kubernetes/kubernetes/pull/63176), [@bswartz](https://github.com/bswartz)) +- Both directory and block devices are now supported for local volume plugin FileSystem VolumeMode. ([#63011](https://github.com/kubernetes/kubernetes/pull/63011), [@NickrenREN](https://github.com/NickrenREN)) +- CSI NodePublish call can optionally contain information about the pod that requested the CSI volume. ([#67945](https://github.com/kubernetes/kubernetes/pull/67945), [@jsafrane](https://github.com/jsafrane)) +- Added support for volume attach limits for CSI volumes. ([#67731](https://github.com/kubernetes/kubernetes/pull/67731), [@gnufied](https://github.com/gnufied)) + +### SIG VMWare + +- The vmUUID is now preserved when renewing nodeinfo in the vSphere cloud provider. ([#66007](https://github.com/kubernetes/kubernetes/pull/66007), [@w-leads](https://github.com/w-leads)) +- You can now configure the vsphere cloud provider with a trusted Root-CA, enabling you to take advantage of TLS certificate rotation. ([#64758](https://github.com/kubernetes/kubernetes/pull/64758), [@mariantalla](https://github.com/mariantalla)) + +### SIG Windows + +- Kubelet no longer attempts to sync iptables on non-Linux systems.. ([#67690](https://github.com/kubernetes/kubernetes/pull/67690), [@feiskyer](https://github.com/feiskyer)) +- Kubelet no longer applies default hard evictions of nodefs.inodesFree on non-Linux systems. ([#67709](https://github.com/kubernetes/kubernetes/pull/67709), [@feiskyer](https://github.com/feiskyer)) +- Windows system container "pods" now support kubelet stats. ([#66427](https://github.com/kubernetes/kubernetes/pull/66427), [@feiskyer](https://github.com/feiskyer)) + +## Other Notable Changes + +### Bug Fixes + +- Update debian-iptables and hyperkube-base images to include CVE fixes. ([#67365](https://github.com/kubernetes/kubernetes/pull/67365), [@ixdy](https://github.com/ixdy)) +- Fix for resourcepool-path configuration in the vsphere.conf file. ([#66261](https://github.com/kubernetes/kubernetes/pull/66261), [@divyenpatel](https://github.com/divyenpatel)) +- This fix prevents a GCE PD volume from being mounted if the udev device link is stale and tries to correct the link. ([#66832](https://github.com/kubernetes/kubernetes/pull/66832), [@msau42](https://github.com/msau42)) +- Fix controller-manager crashes when flex plugin is removed from flex plugin directory ([#65536](https://github.com/kubernetes/kubernetes/pull/65536), [@gnufied](https://github.com/gnufied)) +- Fix local volume directory can't be deleted because of volumeMode error ([#65310](https://github.com/kubernetes/kubernetes/pull/65310), [@wenlxie](https://github.com/wenlxie)) +- bugfix: Do not print feature gates in the generic apiserver code for glog level 0 ([#65584](https://github.com/kubernetes/kubernetes/pull/65584), [@neolit123](https://github.com/neolit123)) +- Fix an issue that pods using hostNetwork keep increasing. ([#67456](https://github.com/kubernetes/kubernetes/pull/67456), [@Huang-Wei](https://github.com/Huang-Wei)) +- fixes an out of range panic in the NoExecuteTaintManager controller when running a non-64-bit build ([#65596](https://github.com/kubernetes/kubernetes/pull/65596), [@liggitt](https://github.com/liggitt)) +- Fix kubelet to not leak goroutines/intofiy watchers on an inactive connection if it's closed ([#67285](https://github.com/kubernetes/kubernetes/pull/67285), [@yujuhong](https://github.com/yujuhong)) +- Fix pod launch by kubelet when --cgroups-per-qos=false and --cgroup-driver="systemd" ([#66617](https://github.com/kubernetes/kubernetes/pull/66617), [@pravisankar](https://github.com/pravisankar)) +- Fixed a panic in the node status update logic when existing node has nil labels. ([#66307](https://github.com/kubernetes/kubernetes/pull/66307), [@guoshimin](https://github.com/guoshimin)) +- Fix the bug where image garbage collection is disabled by mistake. ([#66051](https://github.com/kubernetes/kubernetes/pull/66051), [@jiaxuanzhou](https://github.com/jiaxuanzhou)) +- Fix a bug that preempting a pod may block forever. ([#65987](https://github.com/kubernetes/kubernetes/pull/65987), [@Random-Liu](https://github.com/Random-Liu)) +- fixes the errors/warnings in fluentd configuration ([#67947](https://github.com/kubernetes/kubernetes/pull/67947), [@saravanan30erd](https://github.com/saravanan30erd)) +- Fixed an issue which prevented `gcloud` from working on GCE when metadata concealment was enabled. ([#66630](https://github.com/kubernetes/kubernetes/pull/66630), [@dekkagaijin](https://github.com/dekkagaijin)) +- Fix Stackdriver integration based on node annotation container.googleapis.com/instance_id. ([#66676](https://github.com/kubernetes/kubernetes/pull/66676), [@kawych](https://github.com/kawych)) +- GCE: Fixes loadbalancer creation and deletion issues appearing in 1.10.5. ([#66400](https://github.com/kubernetes/kubernetes/pull/66400), [@nicksardo](https://github.com/nicksardo)) +- Fixed exception detection in fluentd-gcp plugin. ([#65361](https://github.com/kubernetes/kubernetes/pull/65361), [@xperimental](https://github.com/xperimental)) +- kubeadm: Fix panic when node annotation is nil ([#67648](https://github.com/kubernetes/kubernetes/pull/67648), [@xlgao-zju](https://github.com/xlgao-zju)) +- kubeadm: stop setting UID in the kubelet ConfigMap ([#66341](https://github.com/kubernetes/kubernetes/pull/66341), [@runiq](https://github.com/runiq)) +- bazel deb package bugfix: The kubeadm deb package now reloads the kubelet after installation ([#65554](https://github.com/kubernetes/kubernetes/pull/65554), [@rdodev](https://github.com/rdodev)) +- fix cluster-info dump error ([#66652](https://github.com/kubernetes/kubernetes/pull/66652), [@charrywanganthony](https://github.com/charrywanganthony)) +- Fix kubelet startup failure when using ExecPlugin in kubeconfig ([#66395](https://github.com/kubernetes/kubernetes/pull/66395), [@awly](https://github.com/awly)) +- kubectl: fixes a panic displaying pods with nominatedNodeName set ([#66406](https://github.com/kubernetes/kubernetes/pull/66406), [@liggitt](https://github.com/liggitt)) +- prevents infinite CLI wait on delete when item is recreated ([#66136](https://github.com/kubernetes/kubernetes/pull/66136), [@deads2k](https://github.com/deads2k)) +- Fix 'kubectl cp' with no arguments causes a panic ([#65482](https://github.com/kubernetes/kubernetes/pull/65482), [@wgliang](https://github.com/wgliang)) +- Fixes the wrong elasticsearch node counter ([#65627](https://github.com/kubernetes/kubernetes/pull/65627), [@IvanovOleg](https://github.com/IvanovOleg)) +- Fix an issue with dropped audit logs, when truncating and batch backends enabled at the same time. ([#65823](https://github.com/kubernetes/kubernetes/pull/65823), [@loburm](https://github.com/loburm)) +- DaemonSet: Fix bug- daemonset didn't create pod after node have enough resource ([#67337](https://github.com/kubernetes/kubernetes/pull/67337), [@linyouchong](https://github.com/linyouchong)) +- DaemonSet controller is now using backoff algorithm to avoid hot loops fighting with kubelet on pod recreation when a particular DaemonSet is misconfigured. ([#65309](https://github.com/kubernetes/kubernetes/pull/65309), [@tnozicka](https://github.com/tnozicka)) +- Avoid creating new controller revisions for statefulsets when cache is stale ([#67039](https://github.com/kubernetes/kubernetes/pull/67039), [@mortent](https://github.com/mortent)) +- Fixes issue when updating a DaemonSet causes a hash collision. ([#66476](https://github.com/kubernetes/kubernetes/pull/66476), [@mortent](https://github.com/mortent)) +- fix rollout status for statefulsets ([#62943](https://github.com/kubernetes/kubernetes/pull/62943), [@faraazkhan](https://github.com/faraazkhan)) +- fixes a validation error that could prevent updates to StatefulSet objects containing non-normalized resource requests ([#66165](https://github.com/kubernetes/kubernetes/pull/66165), [@liggitt](https://github.com/liggitt)) +- Headless Services with no ports defined will now create Endpoints correctly, and appear in DNS. ([#67622](https://github.com/kubernetes/kubernetes/pull/67622), [@thockin](https://github.com/thockin)) +- Prevent `resourceVersion` updates for custom resources on no-op writes. ([#67562](https://github.com/kubernetes/kubernetes/pull/67562), [@nikhita](https://github.com/nikhita)) +- kube-controller-manager can now start the quota controller when discovery results can only be partially determined. ([#67433](https://github.com/kubernetes/kubernetes/pull/67433), [@deads2k](https://github.com/deads2k)) +- Immediately close the other side of the connection when proxying. ([#67288](https://github.com/kubernetes/kubernetes/pull/67288), [@MHBauer](https://github.com/MHBauer)) +- kube-apiserver: fixes error creating system priority classes when starting multiple apiservers simultaneously ([#67372](https://github.com/kubernetes/kubernetes/pull/67372), [@tanshanshan](https://github.com/tanshanshan)) +- Forget rate limit when CRD establish controller successfully updated CRD condition ([#67370](https://github.com/kubernetes/kubernetes/pull/67370), [@yue9944882](https://github.com/yue9944882)) +- fixes a panic when using a mutating webhook admission plugin with a DELETE operation ([#66425](https://github.com/kubernetes/kubernetes/pull/66425), [@liggitt](https://github.com/liggitt)) +- Fix creation of custom resources when the CRD contains non-conventional pluralization and subresources ([#66249](https://github.com/kubernetes/kubernetes/pull/66249), [@deads2k](https://github.com/deads2k)) +- Aadjusted http/2 buffer sizes for apiservers to prevent starvation issues between concurrent streams ([#67902](https://github.com/kubernetes/kubernetes/pull/67902), [@liggitt](https://github.com/liggitt)) +- Fixed a bug that was blocking extensible error handling when serializing API responses error out. Previously, serialization failures always resulted in the status code of the original response being returned. Now, the following behavior occurs: ([#67041](https://github.com/kubernetes/kubernetes/pull/67041), [@tristanburgess](https://github.com/tristanburgess)) +- Fixes issue where pod scheduling may fail when using local PVs and pod affinity and anti-affinity without the default StatefulSet OrderedReady pod management policy ([#67556](https://github.com/kubernetes/kubernetes/pull/67556), [@msau42](https://github.com/msau42)) +- Fix panic when processing Azure HTTP response. ([#68210](https://github.com/kubernetes/kubernetes/pull/68210), [@feiskyer](https://github.com/feiskyer)) +- Fix volume limit for EBS on m5 and c5 instance types ([#66397](https://github.com/kubernetes/kubernetes/pull/66397), [@gnufied](https://github.com/gnufied)) +- Fix a bug on GCE that /etc/crictl.yaml is not generated when crictl is preloaded. ([#66877](https://github.com/kubernetes/kubernetes/pull/66877), [@Random-Liu](https://github.com/Random-Liu)) +- Revert #63905: Setup dns servers and search domains for Windows Pods. DNS for Windows containers will be set by CNI plugins. ([#66587](https://github.com/kubernetes/kubernetes/pull/66587), [@feiskyer](https://github.com/feiskyer)) +- Fix validation for HealthzBindAddress in kube-proxy when --healthz-port is set to 0 ([#66138](https://github.com/kubernetes/kubernetes/pull/66138), [@wsong](https://github.com/wsong)) +- Fixes issue [#68899](https://github.com/kubernetes/kubernetes/issues/68899) where pods might schedule on an unschedulable node. ([#68984](https://github.com/kubernetes/kubernetes/issues/68984), [@k82cn](https://github.com/k82cn)) + +### Not Very Notable (that is, non-user-facing) + +- Unit tests have been added for scopes and scope selectors in the quota spec ([#66351](https://github.com/kubernetes/kubernetes/pull/66351), [@vikaschoudhary16](https://github.com/vikaschoudhary16)) Courtesy of SIG Node, and SIG Scheduling +- kubelet v1beta1 external ComponentConfig types are now available in the `k8s.io/kubelet` repo ([#67263](https://github.com/kubernetes/kubernetes/pull/67263), [@luxas](https://github.com/luxas)) Courtesy of SIG Cluster Lifecycle, SIG Node, SIG Scheduling, and SIG Testing +- Use sync.map to scale ecache better ([#66862](https://github.com/kubernetes/kubernetes/pull/66862), [@resouer](https://github.com/resouer)) +- Extender preemption should respect IsInterested() ([#66291](https://github.com/kubernetes/kubernetes/pull/66291), [@resouer](https://github.com/resouer)) +- This PR will leverage subtests on the existing table tests for the scheduler units. ([#63665](https://github.com/kubernetes/kubernetes/pull/63665), [@xchapter7x](https://github.com/xchapter7x)) +- This PR will leverage subtests on the existing table tests for the scheduler units. ([#63666](https://github.com/kubernetes/kubernetes/pull/63666), [@xchapter7x](https://github.com/xchapter7x)) +- Re-adds `pkg/generated/bindata.go` to the repository to allow some parts of k8s.io/kubernetes to be go-vendorable. ([#65985](https://github.com/kubernetes/kubernetes/pull/65985), [@ixdy](https://github.com/ixdy)) +- If `TaintNodesByCondition` enabled, taint node with `TaintNodeUnschedulable` when initializing node to avoid race condition. +([#63955](https://github.com/kubernetes/kubernetes/pull/63955), [@k82cn](https://github.com/k82cn)) +- Remove rescheduler since scheduling DS pods by default scheduler is moving to beta. ([#67687](https://github.com/kubernetes/kubernetes/pull/67687), [@Lion-Wei](https://github.com/Lion-Wei)) +- kubeadm: make sure pre-pulled kube-proxy image and the one specified in its daemon set manifest are the same ([#67131](https://github.com/kubernetes/kubernetes/pull/67131), [@rosti](https://github.com/rosti)) +- kubeadm: remove misleading error message regarding image pulling ([#66658](https://github.com/kubernetes/kubernetes/pull/66658), [@dixudx](https://github.com/dixudx)) +- kubeadm: Pull sidecar and dnsmasq-nanny images when using kube-dns ([#66499](https://github.com/kubernetes/kubernetes/pull/66499), [@rosti](https://github.com/rosti)) +- kubeadm: Fix pause image to not use architecture, as it is a manifest list ([#65920](https://github.com/kubernetes/kubernetes/pull/65920), [@dims](https://github.com/dims)) +- kubeadm: Remove usage of `PersistentVolumeLabel` ([#65827](https://github.com/kubernetes/kubernetes/pull/65827), [@xlgao-zju](https://github.com/xlgao-zju)) +- kubeadm: Add a `v1alpha3` API. This change creates a v1alpha3 API that is initially a duplicate of v1alpha2. ([#65629](https://github.com/kubernetes/kubernetes/pull/65629), [@luxas](https://github.com/luxas)) +- Improved error message when checking the rollout status of StatefulSet with OnDelete strategy type. ([#66983](https://github.com/kubernetes/kubernetes/pull/66983), [@mortent](https://github.com/mortent)) +- Defaults for file audit logging backend in batch mode changed: ([#67223](https://github.com/kubernetes/kubernetes/pull/67223), [@tallclair](https://github.com/tallclair)) +- Role, ClusterRole and their bindings for cloud-provider is put under system namespace. Their addonmanager mode switches to EnsureExists. ([#67224](https://github.com/kubernetes/kubernetes/pull/67224), [@grayluck](https://github.com/grayluck)) +- Don't let aggregated apiservers fail to launch if the external-apiserver-authentication configmap is not found in the cluster. ([#67836](https://github.com/kubernetes/kubernetes/pull/67836), [@sttts](https://github.com/sttts)) +- Always create configmaps/extensions-apiserver-authentication from kube-apiserver. ([#67694](https://github.com/kubernetes/kubernetes/pull/67694), [@sttts](https://github.com/sttts)) +- Switched certificate data replacement from "REDACTED" to "DATA+OMITTED" ([#66023](https://github.com/kubernetes/kubernetes/pull/66023), [@ibrasho](https://github.com/ibrasho)) +- Decrease the amount of time it takes to modify kubeconfig files with large amounts of contexts ([#67093](https://github.com/kubernetes/kubernetes/pull/67093), [@juanvallejo](https://github.com/juanvallejo)) +- Make EBS volume expansion faster ([#66728](https://github.com/kubernetes/kubernetes/pull/66728), [@gnufied](https://github.com/gnufied)) +- Remove unused binary and container image for kube-aggregator. The functionality is already integrated into the kube-apiserver. ([#67157](https://github.com/kubernetes/kubernetes/pull/67157), [@dims](https://github.com/dims)) +- kube-controller-manager now uses the informer cache instead of active pod gets in HPA controller ([#68241](https://github.com/kubernetes/kubernetes/pull/68241), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- Replace scale down forbidden window with scale down stabilization window. Rather than waiting a fixed period of time between scale downs HPA now scales down to the highest recommendation it during the scale down stabilization window. ([#68122](https://github.com/kubernetes/kubernetes/pull/68122), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- Improve CPU sample sanitization in HPA by taking metric's freshness into account. ([#68068](https://github.com/kubernetes/kubernetes/pull/68068), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- Replace scale up forbidden window with disregarding CPU samples collected when pod was initializing. ([#67252](https://github.com/kubernetes/kubernetes/pull/67252), [@jbartosik](https://github.com/jbartosik)) +- [e2e] verifying LimitRange update is effective before creating new pod ([#68171](https://github.com/kubernetes/kubernetes/pull/68171), [@dixudx](https://github.com/dixudx)) +- Port 31337 will be used by fluentd ([#68051](https://github.com/kubernetes/kubernetes/pull/68051), [@Szetty](https://github.com/Szetty)) +- Fix flexvolume in containarized kubelets ([#65549](https://github.com/kubernetes/kubernetes/pull/65549), [@gnufied](https://github.com/gnufied)) +- The check for unsupported plugins during volume resize has been moved from the admission controller to the two controllers that handle volume resize. ([#66780](https://github.com/kubernetes/kubernetes/pull/66780), [@kangarlou](https://github.com/kangarlou)) +- kubeadm: remove redundant flags settings for kubelet ([#64682](https://github.com/kubernetes/kubernetes/pull/64682), [@dixudx](https://github.com/dixudx)) +- Set “priorityClassName: system-node-critical” on kube-proxy manifest by default. ([#60150](https://github.com/kubernetes/kubernetes/pull/60150), [@MrHohn](https://github.com/MrHohn)) +- kube-proxy v1beta1 external ComponentConfig types are now available in the `k8s.io/kube-proxy` repo ([#67688](https://github.com/kubernetes/kubernetes/pull/67688), [@Lion-Wei](https://github.com/Lion-Wei)) +- add missing LastTransitionTime of ContainerReady condition ([#64867](https://github.com/kubernetes/kubernetes/pull/64867), [@dixudx](https://github.com/dixudx)) + +## External Dependencies + +- Default etcd server version is unchanged from v1.11: v3.2.18 ([#61198](https://github.com/kubernetes/kubernetes/pull/61198)) +- Rescheduler is unchanged from v1.11: v0.4.0 ([#65454](https://github.com/kubernetes/kubernetes/pull/65454)) +- The validated docker versions are the same as for v1.10: 1.11.2 to 1.13.1 and 17.03.x +- The Go version is go1.10.3, as compared to go10.2 in v1.11. ([#65726](https://github.com/kubernetes/kubernetes/pull/65726), [@ixdy](https://github.com/ixdy)) +- The minimum supported go is the same as for v1.10: go1.9.1. ([#55301](https://github.com/kubernetes/kubernetes/pull/55301)) +- CNI is unchanged from v1.10: v0.6.0 ([#51250](https://github.com/kubernetes/kubernetes/pull/51250)) +- CSI is unchanged from v1.11: 0.3.0 ([#64719](https://github.com/kubernetes/kubernetes/pull/64719)) +- The dashboard add-on unchanged from v1.10: v1.8.3. ([#57326](https://github.com/kubernetes/kubernetes/pull/57326)) +- Bump Heapster to v1.6.0-beta as compared to v1.5.2 in v1.11 ([#67074](https://github.com/kubernetes/kubernetes/pull/67074)) +Cluster Autoscaler has been upgraded to version to v1.3.2-beta.2 from v1.3.0 in v1.11. See [release notes](https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.3.2-beta.2) for details. ([#67697](https://github.com/kubernetes/kubernetes/pull/67697)) +- Kube-dns is unchanged from v1.11: v1.14.10 ([#62676](https://github.com/kubernetes/kubernetes/pull/62676)) +- Influxdb is unchanged from v1.10: v1.3.3 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Grafana is unchanged from v1.10: v4.4.3 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Kibana is at v6.3.2. ([#67582](https://github.com/kubernetes/kubernetes/pull/67582)) +- CAdvisor is unchanged from v1.11: v0.30.1 ([#64987](https://github.com/kubernetes/kubernetes/pull/64987)) +- fluentd-gcp-scaler has been updated to v0.4.0, up from 0.3.0 in v1.11. ([#67691](https://github.com/kubernetes/kubernetes/pull/67691)) +- fluentd in fluentd-es-image is unchanged from 1.10: v1.1.0 ([#58525](https://github.com/kubernetes/kubernetes/pull/58525)) +- Fluentd in fluentd-elasticsearch is unchanged from v1.11: v1.2.4 ([#67434](https://github.com/kubernetes/kubernetes/pull/67434)) +- fluentd-elasticsearch is unchanged from 1.10: v2.0.4 ([#58525](https://github.com/kubernetes/kubernetes/pull/58525)) +- The fluent-plugin-kubernetes_metadata_filter plugin in fluentd-elasticsearch has been downgraded to version 2.0.0 ([#67544](https://github.com/kubernetes/kubernetes/pull/67544)) +- fluentd-gcp is unchanged from 1.10: v3.0.0. ([#60722](https://github.com/kubernetes/kubernetes/pull/60722)) +- Ingress glbc is unchanged from 1.10: v1.0.0 ([#61302](https://github.com/kubernetes/kubernetes/pull/61302)) +- OIDC authentication is unchanged from 1.10: coreos/go-oidc v2 ([#58544](https://github.com/kubernetes/kubernetes/pull/58544)) +- Calico is unchanged from 1.10: v2.6.7 ([#59130](https://github.com/kubernetes/kubernetes/pull/59130)) +- hcsshim is unchanged from v1.11, at v0.11 ([#64272](https://github.com/kubernetes/kubernetes/pull/64272)) +- gitRepo volumes in pods no longer require git 1.8.5 or newer; older git versions are now supported. ([#62394](https://github.com/kubernetes/kubernetes/pull/62394)) +- Upgraded crictl on GCE to v1.11.1, up from 1.11.0 on v1.11. ([#66152](https://github.com/kubernetes/kubernetes/pull/66152)) +CoreDNS has been updated to v1.2.2, up from v1.1.3 in v1.11 ([#68076](https://github.com/kubernetes/kubernetes/pull/68076)) +- Setup dns servers and search domains for Windows Pods in dockershim. Docker EE version >= 17.10.0 is required for propagating DNS to containers. ([#63905](https://github.com/kubernetes/kubernetes/pull/63905)) +- Istio addon is unchanged from v1.11, at 0.8.0. See [full Istio release notes](https://istio.io/about/notes/0.6.html) ([#64537](https://github.com/kubernetes/kubernetes/pull/64537)) +- cadvisor godeps is unchanged from v1.11, at v0.30.0 ([#64800](https://github.com/kubernetes/kubernetes/pull/64800)) +- event-exporter to version v0.2.2, compared to v0.2.0 in v1.11. ([#66157](https://github.com/kubernetes/kubernetes/pull/66157)) +- Rev the Azure SDK for networking to 2017-06-01 ([#61955](https://github.com/kubernetes/kubernetes/pull/61955)) +- Es-image has been upgraded to Elasticsearch 6.3.2 ([#67484](https://github.com/kubernetes/kubernetes/pull/67484)) +- metrics-server has been upgraded to v0.3.0. ([#68077](https://github.com/kubernetes/kubernetes/pull/68077)) +- GLBC has been updated to v1.2.3 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- Ingress-gce has been updated to v 1.2.0 ([#65641](https://github.com/kubernetes/kubernetes/pull/65641)) +- ip-masq-agen has been updated to v2.1.1 ([#67916](https://github.com/kubernetes/kubernetes/pull/67916)) +- [v1.12.0-rc.2](#v1120-rc2) +- [v1.12.0-rc.1](#v1120-rc1) +- [v1.12.0-beta.2](#v1120-beta2) +- [v1.12.0-beta.1](#v1120-beta1) +- [v1.12.0-alpha.1](#v1120-alpha1) + + + +# v1.12.0-rc.2 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.12/examples) + +## Downloads for v1.12.0-rc.2 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes.tar.gz) | `184ea437bc72d0e6a4c96b964de53181273e919a1d4785515da3406c7e982bf5` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-src.tar.gz) | `aee82938827ef05ab0ee81bac42f4f79fff126294469868d02efb3426717d71e` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-darwin-386.tar.gz) | `40ed3ef9bbc4fad7787dd14eae952edf06d40e1094604bc6d10209b8778c3121` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-darwin-amd64.tar.gz) | `a317fe3801ea5387ce474b9759a7e28ede8324587f79935a7a945da44c99a4b2` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-386.tar.gz) | `cd61b4b71d6b739582c02b5be1d87d928507bc59f64ee72629a920cc529a0941` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-amd64.tar.gz) | `306af04fc18ca2588e16fd831358df50a2cb02219687b543073836f835de8583` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-arm.tar.gz) | `497584f2686339cce857cff1ebf4ed10dcd63f4684a03c242b0828fcd307be4c` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-arm64.tar.gz) | `1dfbb8c299f5af15239ef39135a6c8a52ee4c234764ee0437d8f707e636c9124` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-ppc64le.tar.gz) | `668d6f35c5f6adcd25584d9ef74c549db13ffca9d93b4bc8d25609a8e5837640` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-linux-s390x.tar.gz) | `8a8e205c38858bd9d161115e5e2870c6cfc9c82e189d156e7062e6fa979c3fda` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-windows-386.tar.gz) | `cdef48279c22cc8c764e43a4b9c2a86f02f21c80abbbcd48041fb1e89fb1eb67` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-client-windows-amd64.tar.gz) | `50621a3d2b1550c69325422c6dce78f5690574b35d3778dd3afcf698b57f0f54` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-server-linux-amd64.tar.gz) | `87a8438887a2daa199508aae591b158025860b8381c64cbe9b1d0c06c4eebde9` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-server-linux-arm.tar.gz) | `f65be73870a0e564ef8ce1b6bb2b75ff7021a6807de84b5750e4fa78635051b6` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-server-linux-arm64.tar.gz) | `171f15aa8b7c365f4fee70ce025c882a921d0075bd726a99b5534cadd09273ef` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-server-linux-ppc64le.tar.gz) | `abc2003d58bd1aca517415c582ed1e8bb1ed596bf04197f4fc7c0c51865a9f86` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-server-linux-s390x.tar.gz) | `e2ce834abb4d45d91fd7a8d774e47f0f8092eb4edcf556605c2ef6e2b190b8b1` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-linux-amd64.tar.gz) | `6016c3a1e14c42dcc88caed6497de1b2c56a02bb52d836b19e2ff52098302dda` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-linux-arm.tar.gz) | `e712e38c8037159ea074ad93c2f2905cf279f3f119e5fdbf9b97391037a8813f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-linux-arm64.tar.gz) | `7f4095f12d8ad9438919fa447360113799f88bb9435369b9307a41dd9c7692a6` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-linux-ppc64le.tar.gz) | `4aeb5dbb0c68e54570542eb5a1d7506d73c81b57eba3c2080ee73bb53dbc3be0` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-linux-s390x.tar.gz) | `a160599598167208286db6dc73b415952836218d967fa964fc432b213f1b9908` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.2/kubernetes-node-windows-amd64.tar.gz) | `174bedf62b7959d4cb1b1595666f607cd6377c7a2e2208fef5bd554603db5db3` + +## Changelog since v1.12.0-rc.1 + +### Other notable changes + +* Update to use manifest list for etcd image ([#68896](https://github.com/kubernetes/kubernetes/pull/68896), [@ixdy](https://github.com/ixdy)) +* Fix Azure nodes power state for InstanceShutdownByProviderID() ([#68921](https://github.com/kubernetes/kubernetes/pull/68921), [@feiskyer](https://github.com/feiskyer)) +* Bump kube-dns to 1.14.13 ([#68900](https://github.com/kubernetes/kubernetes/pull/68900), [@MrHohn](https://github.com/MrHohn)) + * - Update Alpine base image to 3.8.1. + * - Build multi-arch images correctly. +* kubelet: fix grpc timeout in the CRI client ([#67793](https://github.com/kubernetes/kubernetes/pull/67793), [@fisherxu](https://github.com/fisherxu)) +* Update to golang 1.10.4 ([#68802](https://github.com/kubernetes/kubernetes/pull/68802), [@ixdy](https://github.com/ixdy)) +* kubeadm now uses fat manifests for the kube-dns images ([#68830](https://github.com/kubernetes/kubernetes/pull/68830), [@rosti](https://github.com/rosti)) +* Update Cluster Autoscaler version to 1.12.0. ([#68739](https://github.com/kubernetes/kubernetes/pull/68739), [@losipiuk](https://github.com/losipiuk)) + * See https://github.com/kubernetes/autoscaler/releases/tag/1.12.0 for CA release notes. +* kube-proxy restores the *filter table when running in ipvs mode. ([#68786](https://github.com/kubernetes/kubernetes/pull/68786), [@alexjx](https://github.com/alexjx)) +* New kubeDNS image fixes an issue where SRV records were incorrectly being compressed. Added manifest file for multiple arch images. ([#68430](https://github.com/kubernetes/kubernetes/pull/68430), [@prameshj](https://github.com/prameshj)) +* Drain should delete terminal pods. ([#68767](https://github.com/kubernetes/kubernetes/pull/68767), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + + + +# v1.12.0-rc.1 + +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.12/examples) + +## Downloads for v1.12.0-rc.1 + + +filename | sha256 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes.tar.gz) | `ac65cf9571c3a03105f373db23c8d7f4d01fe1c9ee09b06615bb02d0b81d572c` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-src.tar.gz) | `28518e1d9c7fe5c54aa3b57235ac8d1a7dae02aec04177c38ca157fc2d16edb6` + +### Client Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-darwin-386.tar.gz) | `7b6f6f264464d40b7975baecdd796d4f75c5a305999b4ae1f4513646184cac7c` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-darwin-amd64.tar.gz) | `5feabe3e616125a36ce4c8021d6bdccdec0f3d82f151b80af7cac1453255b4d5` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-386.tar.gz) | `40524a1a09dd24081b3494593a02a461227727f8706077542f2b8603e1cf7e06` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-amd64.tar.gz) | `ac2c9757d7df761bdf8ffc259fff07448c300dd110c7dbe2ae3830197eb023e9` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-arm.tar.gz) | `02f27ae16e8ebb12b3cb66391fe85f64de08a99450d726e9defd2c5bcd590955` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-arm64.tar.gz) | `1286af2cad3f8e2ee8e2dc18a738935779631b58e7ef3da8794bbeadca2f332e` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-ppc64le.tar.gz) | `9c04419b159fb0fe501d6e0c8122d6a80b5d6961070ebc5e759f4327a1156cf4` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-linux-s390x.tar.gz) | `104d5c695826971c64cb0cec26cf791d609d3e831edb33574e9af2c4b191f049` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-windows-386.tar.gz) | `0096f8126eb04eafa9decd258f6d09977d24eee91b83781347a34ebb7d2064aa` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-client-windows-amd64.tar.gz) | `a641a1a421795279a6213163d7becab9dc6014362e6566f13d660ef1638dc286` + +### Server Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-server-linux-amd64.tar.gz) | `202958d3cfb774fd065ad1ec2477dc9c92ce7f0ff355807c9a2a3a61e8dad927` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-server-linux-arm.tar.gz) | `474de8f6a58d51eb01f6cc73b41897351528a839f818d5c4f828a484f8bc988b` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-server-linux-arm64.tar.gz) | `dbd5affd244815bf45ac0c7a56265800864db623a6a37e7ce9ebe5e5896453f8` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-server-linux-ppc64le.tar.gz) | `a62fefa8ad7b3fbfeb7702dac7d4d6f37823b6c3e4edae3356bf0781b48e42e1` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-server-linux-s390x.tar.gz) | `0f77690f87503c8ee7ccb473c9d2b9d26420292defd82249509cf50d8bb1a16c` + +### Node Binaries + +filename | sha256 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-linux-amd64.tar.gz) | `2191845147d5aab08f14312867f86078b513b6aff8685bb8ce84a06b78ae9914` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-linux-arm.tar.gz) | `54de98d7d2a71b78bc7a45e70a2005144d210401663f5a9daadedd05f89291f0` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-linux-arm64.tar.gz) | `a765514e0c4865bb20ceb476af83b9d9356c9b565cfe12615ecf7ad3d5a6b4f7` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-linux-ppc64le.tar.gz) | `b7ae7d159602d0b933614071f11216ede4df3fc2b28a30d0018e06b3bb22cf6e` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-linux-s390x.tar.gz) | `7d4f502eda6aa70b7a18420344abfaec740d74a1edffcb9869e4305c22bba260` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.12.0-rc.1/kubernetes-node-windows-amd64.tar.gz) | `ed5516b1f66a39592a101bec135022b3905a66ae526b8ed3e2e9dff5ed68eda0` + +## Changelog since v1.12.0-beta.2 + +### Action Required + +* Service events are now added in azure-cloud-provider for easily identify the underground errors of Azure API. ([#68212](https://github.com/kubernetes/kubernetes/pull/68212), [@feiskyer](https://github.com/feiskyer)) + * Action required: The following clusterrole and clusterrolebinding should be applied: + ``` + kind: List + apiVersion: v1 + items: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + kubernetes.io/cluster-service: "true" + name: system:azure-cloud-provider + rules: + - apiGroups: [""] + resources: ["events"] + verbs: + - create + - patch + - update + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + kubernetes.io/cluster-service: "true" + name: system:azure-cloud-provider + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:azure-cloud-provider + subjects: + - kind: ServiceAccount + name: azure-cloud-provider + namespace: kube-system + ``` + * If the clusterrole with same has already been provisioned (e.g. for accessing azurefile secrets), then the above yaml should be merged togather, e.g. + ``` + kind: List + apiVersion: v1 + items: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + kubernetes.io/cluster-service: "true" + name: system:azure-cloud-provider + rules: + - apiGroups: [""] + resources: ["events"] + verbs: + - create + - patch + - update + - apiGroups: [""] + resources: ["secrets"] + verbs: + - get + - create + - apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + kubernetes.io/cluster-service: "true" + name: system:azure-cloud-provider + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:azure-cloud-provider + subjects: + - kind: ServiceAccount + name: azure-cloud-provider + namespace: kube-system + - kind: ServiceAccount + name: persistent-volume-binder + namespace: kube-system + ``` + +### Other notable changes + +* Update metrics-server to v0.3.1 ([#68746](https://github.com/kubernetes/kubernetes/pull/68746), [@DirectXMan12](https://github.com/DirectXMan12)) +* Upgrade kubeadm's version of docker support ([#68495](https://github.com/kubernetes/kubernetes/pull/68495), [@yuansisi](https://github.com/yuansisi)) +* fix a bug that overwhelming number of prometheus metrics are generated because $NAMESPACE is not replaced by string "{namespace}" ([#68530](https://github.com/kubernetes/kubernetes/pull/68530), [@wenjiaswe](https://github.com/wenjiaswe)) +* The feature gates `ReadOnlyAPIDataVolumes` and `ServiceProxyAllowExternalIPs`, deprecated since 1.10, have been removed and any references must be removed from command-line invocations. ([#67951](https://github.com/kubernetes/kubernetes/pull/67951), [@liggitt](https://github.com/liggitt)) +* Verify invalid secret/configmap/projected volumes before calling setup ([#68691](https://github.com/kubernetes/kubernetes/pull/68691), [@gnufied](https://github.com/gnufied)) +* Fix bug that caused `kubectl` commands to sometimes fail to refresh access token when running against GKE clusters. ([#66314](https://github.com/kubernetes/kubernetes/pull/66314), [@jlowdermilk](https://github.com/jlowdermilk)) +* Use KubeDNS by default in GCE setups, as CoreDNS has significantly higher memory usage in large clusters. ([#68629](https://github.com/kubernetes/kubernetes/pull/68629), [@shyamjvs](https://github.com/shyamjvs)) +* Fix PodAntiAffinity issues in case of multiple affinityTerms. ([#68173](https://github.com/kubernetes/kubernetes/pull/68173), [@Huang-Wei](https://github.com/Huang-Wei)) +* Make APIGroup field in TypedLocalObjectReference optional. ([#68419](https://github.com/kubernetes/kubernetes/pull/68419), [@xing-yang](https://github.com/xing-yang)) +* Fix potential panic when getting azure load balancer status ([#68609](https://github.com/kubernetes/kubernetes/pull/68609), [@feiskyer](https://github.com/feiskyer)) +* Fix kubelet panics when RuntimeClass is enabled. ([#68521](https://github.com/kubernetes/kubernetes/pull/68521), [@yujuhong](https://github.com/yujuhong)) +* - cAdvisor: Fix NVML initialization race condition ([#68431](https://github.com/kubernetes/kubernetes/pull/68431), [@dashpole](https://github.com/dashpole)) + * - cAdvisor: Fix brtfs filesystem discovery + * - cAdvisor: Fix race condition with AllDockerContainers + * - cAdvisor: Don't watch .mount cgroups + * - cAdvisor: Reduce lock contention during list containers +* Promote ScheduleDaemonSetPods by default scheduler to beta ([#67899](https://github.com/kubernetes/kubernetes/pull/67899), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) + + + # v1.12.0-beta.2 [Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.12/examples) @@ -373,7 +1293,7 @@ filename | sha256 hash # v1.12.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) ## Downloads for v1.12.0-alpha.1 diff --git a/CHANGELOG-1.3.md b/CHANGELOG-1.3.md index c6a1dcb137f..1e211660319 100644 --- a/CHANGELOG-1.3.md +++ b/CHANGELOG-1.3.md @@ -332,7 +332,7 @@ binary | sha1 hash | md5 hash # v1.3.1 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3.0/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads @@ -628,7 +628,7 @@ binary | sha1 hash | md5 hash # v1.3.0-alpha.5 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads @@ -731,7 +731,7 @@ binary | sha1 hash | md5 hash # v1.3.0-alpha.4 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads @@ -820,7 +820,7 @@ binary | sha1 hash | md5 hash # v1.3.0-alpha.3 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads @@ -873,7 +873,7 @@ binary | sha1 hash | md5 hash # v1.3.0-alpha.2 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads @@ -917,7 +917,7 @@ binary | sha1 hash | md5 hash # v1.3.0-alpha.1 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/HEAD/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.3/examples) ## Downloads diff --git a/CHANGELOG-1.4.md b/CHANGELOG-1.4.md index 6af85de7b56..ada428ec673 100644 --- a/CHANGELOG-1.4.md +++ b/CHANGELOG-1.4.md @@ -1123,7 +1123,7 @@ binary | sha256 hash # v1.4.0-alpha.3 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) ## Downloads @@ -1281,7 +1281,7 @@ binary | sha256 hash # v1.4.0-alpha.2 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) ## Downloads @@ -1368,7 +1368,7 @@ binary | sha256 hash # v1.4.0-alpha.1 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.4/examples) ## Downloads diff --git a/CHANGELOG-1.5.md b/CHANGELOG-1.5.md index de5802e6e28..99fb9576dcd 100644 --- a/CHANGELOG-1.5.md +++ b/CHANGELOG-1.5.md @@ -1042,7 +1042,7 @@ filename | sha256 hash # v1.5.0-alpha.2 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.5/examples) ## Downloads for v1.5.0-alpha.2 @@ -1153,7 +1153,7 @@ filename | sha256 hash # v1.5.0-alpha.1 -[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/master/examples) +[Documentation](http://kubernetes.github.io) & [Examples](http://releases.k8s.io/release-1.5/examples) ## Downloads diff --git a/CHANGELOG-1.6.md b/CHANGELOG-1.6.md index 7a010897fe1..7080309563b 100644 --- a/CHANGELOG-1.6.md +++ b/CHANGELOG-1.6.md @@ -2436,7 +2436,7 @@ filename | sha256 hash # v1.6.0-alpha.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.6/examples) ## Downloads for v1.6.0-alpha.3 @@ -2496,7 +2496,7 @@ filename | sha256 hash # v1.6.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.6/examples) ## Downloads for v1.6.0-alpha.2 @@ -2586,7 +2586,7 @@ filename | sha256 hash # v1.6.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.6/examples) ## Downloads for v1.6.0-alpha.1 diff --git a/CHANGELOG-1.7.md b/CHANGELOG-1.7.md index baf0ae6d518..2158517d82b 100644 --- a/CHANGELOG-1.7.md +++ b/CHANGELOG-1.7.md @@ -2841,7 +2841,7 @@ filename | sha256 hash # v1.7.0-alpha.4 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) ## Downloads for v1.7.0-alpha.4 @@ -2962,7 +2962,7 @@ filename | sha256 hash # v1.7.0-alpha.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) ## Downloads for v1.7.0-alpha.3 @@ -3102,7 +3102,7 @@ filename | sha256 hash # v1.7.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) ## Downloads for v1.7.0-alpha.2 @@ -3208,7 +3208,7 @@ filename | sha256 hash # v1.7.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.7/examples) ## Downloads for v1.7.0-alpha.1 diff --git a/CHANGELOG-1.8.md b/CHANGELOG-1.8.md index 1963e2f202f..8de0802674c 100644 --- a/CHANGELOG-1.8.md +++ b/CHANGELOG-1.8.md @@ -2414,7 +2414,7 @@ filename | sha256 hash # v1.8.0-alpha.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples) ## Downloads for v1.8.0-alpha.3 @@ -2681,7 +2681,7 @@ filename | sha256 hash # v1.8.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples) ## Downloads for v1.8.0-alpha.2 @@ -2822,7 +2822,7 @@ filename | sha256 hash # v1.8.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.8/examples) ## Downloads for v1.8.0-alpha.1 diff --git a/CHANGELOG-1.9.md b/CHANGELOG-1.9.md index 001363c2b40..4218c4d911c 100644 --- a/CHANGELOG-1.9.md +++ b/CHANGELOG-1.9.md @@ -1887,7 +1887,7 @@ filename | sha256 hash # v1.9.0-alpha.3 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) ## Downloads for v1.9.0-alpha.3 @@ -2021,7 +2021,7 @@ filename | sha256 hash # v1.9.0-alpha.2 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) ## Downloads for v1.9.0-alpha.2 @@ -2282,7 +2282,7 @@ filename | sha256 hash # v1.9.0-alpha.1 -[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/master/examples) +[Documentation](https://docs.k8s.io) & [Examples](https://releases.k8s.io/release-1.9/examples) ## Downloads for v1.9.0-alpha.1 diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d0a92342c13..e8cfb3b83a2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1500,8 +1500,7 @@ }, { "ImportPath": "github.com/docker/libnetwork/ipvs", - "Comment": "v0.8.0-dev.2-910-gba46b928", - "Rev": "ba46b928444931e6865d8618dc03622cac79aa6f" + "Rev": "a9cd636e37898226332c439363e2ed0ea185ae92" }, { "ImportPath": "github.com/docker/spdystream", @@ -2520,8 +2519,8 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Comment": "1.0.0-9-g05fbef0", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Comment": "v1.0.1", + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/mohae/deepcopy", @@ -3062,11 +3061,11 @@ }, { "ImportPath": "github.com/vishvananda/netlink", - "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" + "Rev": "b2de5d10e38ecce8607e6b438b6d174f389a004e" }, { "ImportPath": "github.com/vishvananda/netlink/nl", - "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" + "Rev": "b2de5d10e38ecce8607e6b438b6d174f389a004e" }, { "ImportPath": "github.com/vishvananda/netns", diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index aed5606b752..5aa51a3f131 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -44,6 +44,7 @@ aliases: - juanvallejo - mengqiy - rootfs + - seans3 - shiywang - smarterclayton - soltysh diff --git a/README.md b/README.md index 81f2521d1fe..6f549e66be3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Kubernetes -[![Submit Queue Widget]][Submit Queue] [![GoDoc Widget]][GoDoc] [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569) +[![GoDoc Widget]][GoDoc] [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/569/badge)](https://bestpractices.coreinfrastructure.org/projects/569) @@ -79,8 +79,6 @@ That said, if you have questions, reach out to us [interactive tutorial]: https://kubernetes.io/docs/tutorials/kubernetes-basics [kubernetes.io]: https://kubernetes.io [Scalable Microservices with Kubernetes]: https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615 -[Submit Queue]: https://submit-queue.k8s.io/#/ci -[Submit Queue Widget]: https://submit-queue.k8s.io/health.svg?v=1 [troubleshooting guide]: https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/ [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/README.md?pixel)]() diff --git a/SUPPORT.md b/SUPPORT.md index 9ae470a292c..2f977e2c398 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -18,22 +18,12 @@ The Kubernetes Community is active on Stack Overflow, you can post your question * [User Documentation](https://kubernetes.io/docs/) * [Troubleshooting Guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/) - ### Real-time Chat * [Slack](https://kubernetes.slack.com) ([registration](http://slack.k8s.io)): The `#kubernetes-users` and `#kubernetes-novice` channels are usual places where people offer support. -* Also check out the -[Slack Archive](http://kubernetes.slackarchive.io/) of past conversations. +### Forum -### Mailing Lists/Groups - -* [Kubernetes-users group](https://groups.google.com/forum/#!forum/kubernetes-users) - - - - +* [Kubernetes Official Forum](https://discuss.kubernetes.io) \ No newline at end of file diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 2addcc153e0..79426189e29 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -29807,19 +29807,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.apps.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.apps.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.apps.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "401": { @@ -50615,19 +50615,19 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "201": { "description": "Created", "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.api.extensions.v1beta1.DeploymentStatus" + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" } }, "401": { @@ -83057,6 +83057,10 @@ "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", "type": "string" }, + "enableServiceLinks": { + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.", + "type": "boolean" + }, "hostAliases": { "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", "type": "array", diff --git a/api/swagger-spec/apps_v1.json b/api/swagger-spec/apps_v1.json index 4882a1ce299..98a07010684 100644 --- a/api/swagger-spec/apps_v1.json +++ b/api/swagger-spec/apps_v1.json @@ -6826,6 +6826,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/apps_v1beta1.json b/api/swagger-spec/apps_v1beta1.json index c463a63ac45..649573a5c8c 100644 --- a/api/swagger-spec/apps_v1beta1.json +++ b/api/swagger-spec/apps_v1beta1.json @@ -1983,7 +1983,7 @@ "description": "API at /apis/apps/v1beta1", "operations": [ { - "type": "v1beta1.DeploymentStatus", + "type": "v1.Status", "method": "POST", "summary": "create rollback of a Deployment", "nickname": "createNamespacedDeploymentRollback", @@ -2025,17 +2025,17 @@ { "code": 200, "message": "OK", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" }, { "code": 201, "message": "Created", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" }, { "code": 202, "message": "Accepted", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" } ], "produces": [ @@ -4434,6 +4434,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/apps_v1beta2.json b/api/swagger-spec/apps_v1beta2.json index d406dc152bd..f9d7e378c53 100644 --- a/api/swagger-spec/apps_v1beta2.json +++ b/api/swagger-spec/apps_v1beta2.json @@ -6826,6 +6826,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/batch_v1.json b/api/swagger-spec/batch_v1.json index 1c7f36766f6..69b8f84fed6 100644 --- a/api/swagger-spec/batch_v1.json +++ b/api/swagger-spec/batch_v1.json @@ -1746,6 +1746,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/batch_v1beta1.json b/api/swagger-spec/batch_v1beta1.json index b43f6107c15..df0a4b8da15 100644 --- a/api/swagger-spec/batch_v1beta1.json +++ b/api/swagger-spec/batch_v1beta1.json @@ -1801,6 +1801,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/batch_v2alpha1.json b/api/swagger-spec/batch_v2alpha1.json index 044d864f92a..07d1e170de6 100644 --- a/api/swagger-spec/batch_v2alpha1.json +++ b/api/swagger-spec/batch_v2alpha1.json @@ -1801,6 +1801,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/extensions_v1beta1.json b/api/swagger-spec/extensions_v1beta1.json index f81b195df91..f88198088db 100644 --- a/api/swagger-spec/extensions_v1beta1.json +++ b/api/swagger-spec/extensions_v1beta1.json @@ -2153,7 +2153,7 @@ "description": "API at /apis/extensions/v1beta1", "operations": [ { - "type": "v1beta1.DeploymentStatus", + "type": "v1.Status", "method": "POST", "summary": "create rollback of a Deployment", "nickname": "createNamespacedDeploymentRollback", @@ -2195,17 +2195,17 @@ { "code": 200, "message": "OK", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" }, { "code": 201, "message": "Created", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" }, { "code": 202, "message": "Accepted", - "responseModel": "v1beta1.DeploymentStatus" + "responseModel": "v1.Status" } ], "produces": [ @@ -7474,6 +7474,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index dd100b733fd..6646c7e91c3 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -20607,6 +20607,10 @@ "runtimeClassName": { "type": "string", "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future." + }, + "enableServiceLinks": { + "type": "boolean", + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links." } } }, diff --git a/build/build-image/cross/Dockerfile b/build/build-image/cross/Dockerfile index 2134ab312b4..ef4737c2674 100644 --- a/build/build-image/cross/Dockerfile +++ b/build/build-image/cross/Dockerfile @@ -15,7 +15,7 @@ # This file creates a standard build environment for building cross # platform go binary for the architecture kubernetes cares about. -FROM golang:1.10.3 +FROM golang:1.10.4 ENV GOARM 7 ENV KUBE_DYNAMIC_CROSSPLATFORMS \ diff --git a/build/build-image/cross/VERSION b/build/build-image/cross/VERSION index e5b42437098..9aa67b04407 100644 --- a/build/build-image/cross/VERSION +++ b/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.10.3-1 +v1.10.4-1 diff --git a/build/common.sh b/build/common.sh index ea865f72297..28d0f2a683e 100755 --- a/build/common.sh +++ b/build/common.sh @@ -88,7 +88,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730 # # $1 - server architecture kube::build::get_docker_wrapped_binaries() { - debian_iptables_version=v10.1 + debian_iptables_version=v10.2 ### If you change any of these lists, please also update DOCKERIZED_BINARIES ### in build/BUILD. And kube::golang::server_image_targets case $1 in @@ -600,6 +600,7 @@ function kube::build::run_build_command_ex() { --env "GOFLAGS=${GOFLAGS:-}" --env "GOLDFLAGS=${GOLDFLAGS:-}" --env "GOGCFLAGS=${GOGCFLAGS:-}" + --env "SOURCE_DATE_EPOCH=${SOURCE_DATE_EPOCH:-}" ) if [[ -n "${DOCKER_CGROUP_PARENT:-}" ]]; then diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index 3f9cd4b29b2..379fa491962 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -12,19 +12,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -all: build +all: all-build REGISTRY ?= staging-k8s.gcr.io -IMAGE ?= debian-base +IMAGE ?= $(REGISTRY)/debian-base BUILD_IMAGE ?= debian-build TAG ?= 0.3.2 TAR_FILE ?= rootfs.tar ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x + TEMP_DIR:=$(shell mktemp -d) QEMUVERSION=v2.9.1 +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled + ifeq ($(ARCH),amd64) BASEIMAGE?=debian:stretch endif @@ -45,6 +50,23 @@ ifeq ($(ARCH),s390x) QEMUARCH=s390x endif +sub-build-%: + $(MAKE) ARCH=$* build + +all-build: $(addprefix sub-build-,$(ALL_ARCH)) + +sub-push-image-%: + $(MAKE) ARCH=$* push + +all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH)) + +all-push: all-push-images push-manifest + +push-manifest: + docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done + docker manifest push ${IMAGE}:${TAG} + build: clean cp ./* $(TEMP_DIR) cat Dockerfile.build \ @@ -69,13 +91,13 @@ endif docker build --pull -t $(BUILD_IMAGE) -f $(TEMP_DIR)/Dockerfile.build $(TEMP_DIR) docker create --name $(BUILD_IMAGE) $(BUILD_IMAGE) docker export $(BUILD_IMAGE) > $(TEMP_DIR)/$(TAR_FILE) - docker build -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) + docker build -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) rm -rf $(TEMP_DIR) push: build - docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + docker push $(IMAGE)-$(ARCH):$(TAG) clean: - docker rmi -f $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) || true + docker rmi -f $(IMAGE)-$(ARCH):$(TAG) || true docker rmi -f $(BUILD_IMAGE) || true docker rm -f $(BUILD_IMAGE) || true diff --git a/build/debian-hyperkube-base/Dockerfile b/build/debian-hyperkube-base/Dockerfile index c15103816bd..f4b5ecbec79 100644 --- a/build/debian-hyperkube-base/Dockerfile +++ b/build/debian-hyperkube-base/Dockerfile @@ -38,6 +38,7 @@ RUN echo CACHEBUST>/dev/null && clean-install \ jq \ kmod \ openssh-client \ + netbase \ nfs-common \ socat \ udev \ diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index f6440885562..4a67da27a09 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -18,9 +18,10 @@ # [ARCH=amd64] [REGISTRY="staging-k8s.gcr.io"] make (build|push) REGISTRY?=staging-k8s.gcr.io -IMAGE?=debian-hyperkube-base -TAG=0.10.1 +IMAGE?=$(REGISTRY)/debian-hyperkube-base +TAG=0.10.2 ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x CACHEBUST?=1 BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.3.2 @@ -29,9 +30,29 @@ CNI_VERSION=v0.6.0 TEMP_DIR:=$(shell mktemp -d) CNI_TARBALL=cni-plugins-$(ARCH)-$(CNI_VERSION).tgz -.PHONY: all build push clean +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled -all: push +.PHONY: all build push clean all-build all-push-images all-push push-manifest + +all: all-push + +sub-build-%: + $(MAKE) ARCH=$* build + +all-build: $(addprefix sub-build-,$(ALL_ARCH)) + +sub-push-image-%: + $(MAKE) ARCH=$* push + +all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH)) + +all-push: all-push-images push-manifest + +push-manifest: + docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done + docker manifest push ${IMAGE}:${TAG} cni-tars/$(CNI_TARBALL): mkdir -p cni-tars/ @@ -55,8 +76,8 @@ ifneq ($(ARCH),amd64) # Register /usr/bin/qemu-ARCH-static as the handler for non-x86 binaries in the kernel docker run --rm --privileged multiarch/qemu-user-static:register --reset endif - docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) + docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) rm -rf $(TEMP_DIR) push: build - docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + docker push $(IMAGE)-$(ARCH):$(TAG) diff --git a/build/debian-hyperkube-base/README.md b/build/debian-hyperkube-base/README.md index 736684a60d1..b37c04ddf23 100644 --- a/build/debian-hyperkube-base/README.md +++ b/build/debian-hyperkube-base/README.md @@ -10,24 +10,16 @@ This image is compiled for multiple architectures. If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile. ```console -# Build for linux/amd64 (default) -$ make push ARCH=amd64 +# Build and push images for all the architectures +$ make all-push # ---> staging-k8s.gcr.io/debian-hyperkube-base-amd64:TAG - -$ make push ARCH=arm # ---> staging-k8s.gcr.io/debian-hyperkube-base-arm:TAG - -$ make push ARCH=arm64 # ---> staging-k8s.gcr.io/debian-hyperkube-base-arm64:TAG - -$ make push ARCH=ppc64le # ---> staging-k8s.gcr.io/debian-hyperkube-base-ppc64le:TAG - -$ make push ARCH=s390x # ---> staging-k8s.gcr.io/debian-hyperkube-base-s390x:TAG ``` -If you don't want to push the images, run `make build` instead +If you don't want to push the images, run `make all-build` instead [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-hyperkube-base/README.md?pixel)]() diff --git a/build/debian-iptables/Dockerfile b/build/debian-iptables/Dockerfile index e4fd481fbb4..c7953b3762e 100644 --- a/build/debian-iptables/Dockerfile +++ b/build/debian-iptables/Dockerfile @@ -19,4 +19,5 @@ RUN clean-install \ ebtables \ ipset \ iptables \ - kmod + kmod \ + netbase diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index 26f383b9104..8c793db5aac 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -12,16 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: build push +.PHONY: build push all all-build all-push-images all-push push-manifest REGISTRY?="staging-k8s.gcr.io" -IMAGE=debian-iptables -TAG?=v10.1 +IMAGE=$(REGISTRY)/debian-iptables +TAG?=v10.2 ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x TEMP_DIR:=$(shell mktemp -d) BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.3.2 +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled + build: cp ./* $(TEMP_DIR) cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile @@ -31,9 +35,26 @@ ifneq ($(ARCH),amd64) docker run --rm --privileged multiarch/qemu-user-static:register --reset endif - docker build --pull -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) + docker build --pull -t $(IMAGE)-$(ARCH):$(TAG) $(TEMP_DIR) push: build - docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) + docker push $(IMAGE)-$(ARCH):$(TAG) -all: push +sub-build-%: + $(MAKE) ARCH=$* build + +all-build: $(addprefix sub-build-,$(ALL_ARCH)) + +sub-push-image-%: + $(MAKE) ARCH=$* push + +all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH)) + +all-push: all-push-images push-manifest + +push-manifest: + docker manifest create --amend $(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(IMAGE)\-&:$(TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${IMAGE}:${TAG} ${IMAGE}-$${arch}:${TAG}; done + docker manifest push ${IMAGE}:${TAG} + +all: all-push diff --git a/build/debian-iptables/README.md b/build/debian-iptables/README.md index a961cd6f6df..6900a857ed2 100644 --- a/build/debian-iptables/README.md +++ b/build/debian-iptables/README.md @@ -9,24 +9,16 @@ This image is compiled for multiple architectures. If you're editing the Dockerfile or some other thing, please bump the `TAG` in the Makefile. ```console -# Build for linux/amd64 (default) -$ make push ARCH=amd64 +Build and push images for all the architectures +$ make all-push # ---> staging-k8s.gcr.io/debian-iptables-amd64:TAG - -$ make push ARCH=arm # ---> staging-k8s.gcr.io/debian-iptables-arm:TAG - -$ make push ARCH=arm64 # ---> staging-k8s.gcr.io/debian-iptables-arm64:TAG - -$ make push ARCH=ppc64le # ---> staging-k8s.gcr.io/debian-iptables-ppc64le:TAG - -$ make push ARCH=s390x # ---> staging-k8s.gcr.io/debian-iptables-s390x:TAG ``` -If you don't want to push the images, run `make` or `make build` instead +If you don't want to push the images, run `make build ARCH={target_arch}` or `make all-build` instead [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/build/debian-iptables/README.md?pixel)]() diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index 23a59acbf68..cd860c66b91 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -48,7 +48,7 @@ load("@io_bazel_rules_docker//docker:docker.bzl", "docker_pull", "docker_reposit go_rules_dependencies() go_register_toolchains( - go_version = "1.10.3", + go_version = "1.10.4", ) docker_repositories() @@ -67,18 +67,18 @@ http_file( docker_pull( name = "debian-iptables-amd64", - digest = "sha256:58e53e477d204fe32f761ec2718b792f653063d4192ae89efc79e4b6a8dbba91", + digest = "sha256:0987db7ce42949d20ed2647a65d4bee0b616b4d40c7ea54769cc24b7ad003677", registry = "k8s.gcr.io", repository = "debian-iptables-amd64", - tag = "v10.1", # ignored, but kept here for documentation + tag = "v10.2", # ignored, but kept here for documentation ) docker_pull( name = "debian-hyperkube-base-amd64", - digest = "sha256:1c83ca9c8ac4a06e4585802edf8a1cd954011152409116e9c801f4736b97b956", + digest = "sha256:c50522965140c9f206900bf47d547d601c04943e1e59801ba5f70235773cfbb6", registry = "k8s.gcr.io", repository = "debian-hyperkube-base-amd64", - tag = "0.10.1", # ignored, but kept here for documentation + tag = "0.10.2", # ignored, but kept here for documentation ) docker_pull( diff --git a/build/rpms/50-kubeadm.conf b/build/rpms/50-kubeadm.conf new file mode 100644 index 00000000000..a5d977316dd --- /dev/null +++ b/build/rpms/50-kubeadm.conf @@ -0,0 +1,2 @@ +# The file is provided as part of the kubeadm package +net.ipv4.ip_forward = 1 diff --git a/build/rpms/kubeadm.conf b/build/rpms/kubeadm.conf new file mode 100644 index 00000000000..4b3e02da8cc --- /dev/null +++ b/build/rpms/kubeadm.conf @@ -0,0 +1,2 @@ +# Load br_netfilter module at boot +br_netfilter diff --git a/build/rpms/kubeadm.spec b/build/rpms/kubeadm.spec index 891eb5da76f..098ce1de54f 100644 --- a/build/rpms/kubeadm.spec +++ b/build/rpms/kubeadm.spec @@ -21,8 +21,15 @@ install -m 644 -d %{buildroot}%{_sysconfdir}/sysconfig/ install -p -m 755 -t %{buildroot}%{_bindir} {kubeadm} install -p -m 644 -t %{buildroot}%{_sysconfdir}/systemd/system/kubelet.service.d/ {10-kubeadm.conf} install -p -m 644 -T {kubelet.env} %{buildroot}%{_sysconfdir}/sysconfig/kubelet +mkdir -p %{buildroot}%{_libexecdir}/modules-load.d +mkdir -p %{buildroot}%{_sysctldir} +install -p -m 0644 -t %{buildroot}%{_libexecdir}/modules-load.d/ {kubeadm.conf} +install -p -m 0644 -t %{buildroot}%{_sysctldir} %{50-kubeadm.conf} %files %{_bindir}/kubeadm %{_sysconfdir}/systemd/system/kubelet.service.d/10-kubeadm.conf %{_sysconfdir}/sysconfig/kubelet +%dir %{_libexecdir}/modules-load.d +%{_libexecdir}/modules-load.d/kubeadm.conf +%{_sysctldir}/50-kubeadm.conf diff --git a/build/rpms/kubelet.spec b/build/rpms/kubelet.spec index fcc716998f5..44c03261265 100644 --- a/build/rpms/kubelet.spec +++ b/build/rpms/kubelet.spec @@ -27,5 +27,5 @@ install -p -m 644 -t %{buildroot}%{_sysconfdir}/systemd/system/ {kubelet.service %files %{_bindir}/kubelet -%{_sysconfdir}/systemd/system/kubelet.service +%attr(644,-,-) %{_sysconfdir}/systemd/system/kubelet.service %{_sysconfdir}/kubernetes/manifests/ diff --git a/cluster/addons/calico-policy-controller/calico-clusterrole.yaml b/cluster/addons/calico-policy-controller/calico-clusterrole.yaml index 0e3f406091e..b1b83498d8a 100644 --- a/cluster/addons/calico-policy-controller/calico-clusterrole.yaml +++ b/cluster/addons/calico-policy-controller/calico-clusterrole.yaml @@ -10,6 +10,7 @@ rules: - apiGroups: [""] resources: - namespaces + - serviceaccounts verbs: - get - list diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns/kube-dns.yaml.base index aab91d948e5..1cef09d678c 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.base @@ -96,7 +96,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -147,7 +147,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -187,7 +187,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns/kube-dns.yaml.in index a12b45a9c12..3f7fa5eeaec 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.in @@ -96,7 +96,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -147,7 +147,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -187,7 +187,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed index 7e0bfa9af2f..85858c02a68 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed @@ -96,7 +96,7 @@ spec: optional: true containers: - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-kube-dns:1.14.13 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -147,7 +147,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.14.13 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -187,7 +187,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10 + image: k8s.gcr.io/k8s-dns-sidecar:1.14.13 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile index ccf2d3812e1..7f84870bd5b 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Dockerfile @@ -55,4 +55,4 @@ EXPOSE 80 ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 # Start Fluentd to pick up our config that watches Docker container logs. -CMD /run.sh $FLUENTD_ARGS +CMD ["/run.sh"] diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/run.sh b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/run.sh index be0011b61d0..275bc733994 100755 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/run.sh +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/run.sh @@ -20,4 +20,4 @@ # For systems without journald mkdir -p /var/log/journal -exec /usr/local/bin/fluentd $@ +exec /usr/local/bin/fluentd $FLUENTD_ARGS diff --git a/cluster/addons/fluentd-gcp/scaler-deployment.yaml b/cluster/addons/fluentd-gcp/scaler-deployment.yaml index 85d5d01336f..04e0e31b827 100644 --- a/cluster/addons/fluentd-gcp/scaler-deployment.yaml +++ b/cluster/addons/fluentd-gcp/scaler-deployment.yaml @@ -5,7 +5,7 @@ metadata: namespace: kube-system labels: k8s-app: fluentd-gcp-scaler - version: v0.4.0 + version: v0.5.0 addonmanager.kubernetes.io/mode: Reconcile spec: selector: @@ -19,7 +19,7 @@ spec: serviceAccountName: fluentd-gcp-scaler containers: - name: fluentd-gcp-scaler - image: k8s.gcr.io/fluentd-gcp-scaler:0.4 + image: k8s.gcr.io/fluentd-gcp-scaler:0.5 command: - /scaler.sh - --ds-name=fluentd-gcp-{{ fluentd_gcp_yaml_version }} diff --git a/cluster/addons/metrics-server/metrics-server-deployment.yaml b/cluster/addons/metrics-server/metrics-server-deployment.yaml index f09362de515..bd412047b64 100644 --- a/cluster/addons/metrics-server/metrics-server-deployment.yaml +++ b/cluster/addons/metrics-server/metrics-server-deployment.yaml @@ -23,24 +23,24 @@ data: apiVersion: extensions/v1beta1 kind: Deployment metadata: - name: metrics-server-v0.3.0 + name: metrics-server-v0.3.1 namespace: kube-system labels: k8s-app: metrics-server kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile - version: v0.3.0 + version: v0.3.1 spec: selector: matchLabels: k8s-app: metrics-server - version: v0.3.0 + version: v0.3.1 template: metadata: name: metrics-server labels: k8s-app: metrics-server - version: v0.3.0 + version: v0.3.1 annotations: scheduler.alpha.kubernetes.io/critical-pod: '' seccomp.security.alpha.kubernetes.io/pod: 'docker/default' @@ -49,7 +49,7 @@ spec: serviceAccountName: metrics-server containers: - name: metrics-server - image: k8s.gcr.io/metrics-server-amd64:v0.3.0 + image: k8s.gcr.io/metrics-server-amd64:v0.3.1 command: - /metrics-server - --metric-resolution=30s @@ -90,7 +90,7 @@ spec: - --memory={{ base_metrics_server_memory }} - --extra-memory={{ metrics_server_memory_per_node }}Mi - --threshold=5 - - --deployment=metrics-server-v0.3.0 + - --deployment=metrics-server-v0.3.1 - --container=metrics-server - --poll-period=300000 - --estimator=exponential diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index ebb9b9d23c1..33da8531675 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -55,9 +55,6 @@ NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-} NODE_ACCELERATORS=${NODE_ACCELERATORS:-""} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} -if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then - NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true" -fi PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true} KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below @@ -177,6 +174,12 @@ HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" # NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes. NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}" +if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then + NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true" +elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true" +fi + # To avoid running Calico on a node that is not configured appropriately, # label each Node so that the DaemonSet can run the Pods only on ready Nodes. if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then @@ -253,13 +256,13 @@ FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}" if [[ ! -z "${NODE_ACCELERATORS}" ]]; then FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true" if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then - NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}" + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}" fi fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}" DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index b2c6f980461..b607e4c25e8 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -55,9 +55,6 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true} KUBE_APISERVER_REQUEST_TIMEOUT=300 PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false} -if [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then - NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true" -fi KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true} KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true} CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false} @@ -176,7 +173,7 @@ ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}" # Useful for scheduling heapster in large clusters with nodes of small size. HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}" -# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-0) if you need +# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.2.24-1) if you need # non-default version. ETCD_IMAGE="${TEST_ETCD_IMAGE:-}" ETCD_DOCKER_REPOSITORY="${TEST_ETCD_DOCKER_REPOSITORY:-}" @@ -215,6 +212,12 @@ KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}" # NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes. NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}" +if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then + NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true" +elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true" +fi + # Optional: Enable netd. ENABLE_NETD="${KUBE_ENABLE_NETD:-false}" CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}" @@ -260,13 +263,13 @@ fi if [[ ! -z "${NODE_ACCELERATORS}" ]]; then FEATURE_GATES="${FEATURE_GATES},DevicePlugins=true" if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then - NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}" + NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}" fi fi # Optional: Install cluster DNS. # Set CLUSTER_DNS_CORE_DNS to 'true' to install CoreDNS instead of kube-dns. -CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}" +CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-false}" ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="10.0.0.10" DNS_DOMAIN="cluster.local" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 6cf9c37e6b1..3979219806c 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -782,7 +782,7 @@ function create-master-audit-policy { - group: "storage.k8s.io"' cat <"${path}" -apiVersion: audit.k8s.io/v1beta1 +apiVersion: audit.k8s.io/v1 kind: Policy rules: # The following requests were manually identified as high-volume and low-risk, @@ -1194,6 +1194,7 @@ function start-kubelet { local -r kubelet_env_file="/etc/default/kubelet" local kubelet_opts="${KUBELET_ARGS} ${KUBELET_CONFIG_FILE_ARG:-}" echo "KUBELET_OPTS=\"${kubelet_opts}\"" > "${kubelet_env_file}" + echo "KUBE_COVERAGE_FILE=\"/var/log/kubelet.cov\"" >> "${kubelet_env_file}" # Write the systemd service file for kubelet. cat </etc/systemd/system/kubelet.service @@ -1311,7 +1312,7 @@ function prepare-kube-proxy-manifest-variables { sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file} sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file} sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file} - sed -i -e "s@{{ cpurequest }}@50m@g" ${src_file} + sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file} sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file} sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file} if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then @@ -1432,10 +1433,10 @@ function start-etcd-servers { rm -f /etc/init.d/etcd fi prepare-log-file /var/log/etcd.log - prepare-etcd-manifest "" "2379" "2380" "100m" "etcd.manifest" + prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest" prepare-log-file /var/log/etcd-events.log - prepare-etcd-manifest "-events" "4002" "2381" "50m" "etcd-events.manifest" + prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest" } # Calculates the following variables based on env variables, which will be used diff --git a/cluster/gce/manifests/cluster-autoscaler.manifest b/cluster/gce/manifests/cluster-autoscaler.manifest index 4524f0041c6..b4f71ac1b07 100644 --- a/cluster/gce/manifests/cluster-autoscaler.manifest +++ b/cluster/gce/manifests/cluster-autoscaler.manifest @@ -17,7 +17,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.12.0-beta.1", + "image": "k8s.gcr.io/cluster-autoscaler:v1.12.0", "livenessProbe": { "httpGet": { "path": "/health-check", @@ -44,7 +44,7 @@ ], "resources": { "requests": { - "cpu": "5m", + "cpu": "10m", "memory": "300Mi" } }, diff --git a/cluster/gce/manifests/etcd.manifest b/cluster/gce/manifests/etcd.manifest index 71a811dbb6d..2649f7a5234 100644 --- a/cluster/gce/manifests/etcd.manifest +++ b/cluster/gce/manifests/etcd.manifest @@ -14,7 +14,7 @@ "containers":[ { "name": "etcd-container", - "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.24-0') }}", + "image": "{{ pillar.get('etcd_docker_repository', 'k8s.gcr.io/etcd') }}:{{ pillar.get('etcd_docker_tag', '3.2.24-1') }}", "resources": { "requests": { "cpu": {{ cpulimit }} diff --git a/cluster/gce/manifests/glbc.manifest b/cluster/gce/manifests/glbc.manifest index 3fff6f5b1db..d7643791c28 100644 --- a/cluster/gce/manifests/glbc.manifest +++ b/cluster/gce/manifests/glbc.manifest @@ -39,7 +39,7 @@ spec: # master components on a single core master. # TODO: Make resource requirements depend on the size of the cluster requests: - cpu: 5m + cpu: 10m memory: 50Mi command: # TODO: split this out into args when we no longer need to pipe stdout to a file #6428 diff --git a/cluster/gce/manifests/kube-addon-manager.yaml b/cluster/gce/manifests/kube-addon-manager.yaml index af2c922d978..6d43808b0cd 100644 --- a/cluster/gce/manifests/kube-addon-manager.yaml +++ b/cluster/gce/manifests/kube-addon-manager.yaml @@ -21,7 +21,7 @@ spec: - exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1 resources: requests: - cpu: 3m + cpu: 5m memory: 50Mi volumeMounts: - mountPath: /etc/kubernetes/ diff --git a/cluster/gce/manifests/kube-apiserver.manifest b/cluster/gce/manifests/kube-apiserver.manifest index 7bb5214acd9..0a87bf5f5a5 100644 --- a/cluster/gce/manifests/kube-apiserver.manifest +++ b/cluster/gce/manifests/kube-apiserver.manifest @@ -22,7 +22,7 @@ "image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}", "resources": { "requests": { - "cpu": "125m" + "cpu": "250m" } }, "command": [ diff --git a/cluster/gce/manifests/kube-controller-manager.manifest b/cluster/gce/manifests/kube-controller-manager.manifest index 1ac0cdf3f23..124182bb09e 100644 --- a/cluster/gce/manifests/kube-controller-manager.manifest +++ b/cluster/gce/manifests/kube-controller-manager.manifest @@ -21,7 +21,7 @@ "image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}", "resources": { "requests": { - "cpu": "100m" + "cpu": "200m" } }, "command": [ diff --git a/cluster/gce/manifests/kube-scheduler.manifest b/cluster/gce/manifests/kube-scheduler.manifest index 3b36b365c88..a3970bc1619 100644 --- a/cluster/gce/manifests/kube-scheduler.manifest +++ b/cluster/gce/manifests/kube-scheduler.manifest @@ -21,7 +21,7 @@ "image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}", "resources": { "requests": { - "cpu": "40m" + "cpu": "75m" } }, "command": [ diff --git a/cluster/gce/upgrade-aliases.sh b/cluster/gce/upgrade-aliases.sh index 2c3df6c90a6..53a400dfe7e 100755 --- a/cluster/gce/upgrade-aliases.sh +++ b/cluster/gce/upgrade-aliases.sh @@ -161,7 +161,7 @@ export KUBE_GCE_ENABLE_IP_ALIASES=true export SECONDARY_RANGE_NAME="pods-default" export STORAGE_BACKEND="etcd3" export STORAGE_MEDIA_TYPE="application/vnd.kubernetes.protobuf" -export ETCD_IMAGE=3.2.24-0 +export ETCD_IMAGE=3.2.24-1 export ETCD_VERSION=3.2.24 # Upgrade master with updated kube envs diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index a031bd0851a..49978053183 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -847,7 +847,7 @@ ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false}) LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-}) ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-}) ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false}) -CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-true}) +CLUSTER_DNS_CORE_DNS: $(yaml-quote ${CLUSTER_DNS_CORE_DNS:-false}) DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-}) DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-}) ENABLE_DNS_HORIZONTAL_AUTOSCALER: $(yaml-quote ${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-false}) diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index 93f9e50a9a9..8a2f1ba873c 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -15,7 +15,7 @@ # Build the etcd image # # Usage: -# [BUNDLED_ETCD_VERSIONS=2.2.1 2.3.7 3.0.17 3.1.12 3.2.18 3.2.24] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) +# [BUNDLED_ETCD_VERSIONS=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24] [REGISTRY=k8s.gcr.io] [ARCH=amd64] [BASEIMAGE=busybox] make (build|push) # # The image contains different etcd versions to simplify # upgrades. Thus be careful when removing any versions from here. @@ -26,7 +26,7 @@ # Except from etcd-$(version) and etcdctl-$(version) binaries, we also # need etcd and etcdctl binaries for backward compatibility reasons. # That binary will be set to the last version from $(BUNDLED_ETCD_VERSIONS). -BUNDLED_ETCD_VERSIONS?=2.2.1 2.3.7 3.0.17 3.1.12 3.2.18 3.2.24 +BUNDLED_ETCD_VERSIONS?=2.2.1 2.3.7 3.0.17 3.1.12 3.2.24 # LATEST_ETCD_VERSION identifies the most recent etcd version available. LATEST_ETCD_VERSION?=3.2.24 @@ -34,17 +34,23 @@ LATEST_ETCD_VERSION?=3.2.24 # REVISION provides a version number fo this image and all it's bundled # artifacts. It should start at zero for each LATEST_ETCD_VERSION and increment # for each revision of this image at that etcd version. -REVISION?=0 +REVISION?=1 # IMAGE_TAG Uniquely identifies k8s.gcr.io/etcd docker image with a tag of the form "-". IMAGE_TAG=$(LATEST_ETCD_VERSION)-$(REVISION) ARCH?=amd64 +ALL_ARCH = amd64 arm arm64 ppc64le s390x # Image should be pulled from k8s.gcr.io, which will auto-detect # region (us, eu, asia, ...) and pull from the closest. REGISTRY?=k8s.gcr.io # Images should be pushed to staging-k8s.gcr.io. PUSH_REGISTRY?=staging-k8s.gcr.io + +MANIFEST_IMAGE := $(PUSH_REGISTRY)/etcd + +# This option is for running docker manifest command +export DOCKER_CLI_EXPERIMENTAL := enabled # golang version should match the golang version from https://github.com/coreos/etcd/releases for the current ETCD_VERSION. GOLANG_VERSION?=1.8.7 GOARM=7 @@ -118,14 +124,25 @@ endif docker build --pull -t $(REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) $(TEMP_DIR) push: build - docker tag $(REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) $(PUSH_REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) - docker push $(PUSH_REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) + docker tag $(REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) $(MANIFEST_IMAGE)-$(ARCH):$(IMAGE_TAG) + docker push $(MANIFEST_IMAGE)-$(ARCH):$(IMAGE_TAG) -ifeq ($(ARCH),amd64) - # Backward compatibility. TODO: deprecate this image tag - docker tag $(REGISTRY)/etcd-$(ARCH):$(IMAGE_TAG) $(PUSH_REGISTRY)/etcd:$(IMAGE_TAG) - docker push $(PUSH_REGISTRY)/etcd:$(IMAGE_TAG) -endif +sub-build-%: + $(MAKE) ARCH=$* build + +all-build: $(addprefix sub-build-,$(ALL_ARCH)) + +sub-push-image-%: + $(MAKE) ARCH=$* push + +all-push-images: $(addprefix sub-push-image-,$(ALL_ARCH)) + +all-push: all-push-images push-manifest + +push-manifest: + docker manifest create --amend $(MANIFEST_IMAGE):$(IMAGE_TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(MANIFEST_IMAGE)\-&:$(IMAGE_TAG)~g") + @for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} ${MANIFEST_IMAGE}:${IMAGE_TAG} ${MANIFEST_IMAGE}-$${arch}:${IMAGE_TAG}; done + docker manifest push ${MANIFEST_IMAGE}:${IMAGE_TAG} unit-test: docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \ @@ -146,5 +163,5 @@ integration-test: integration-build-test: build-integration-test-image integration-test test: unit-test integration-build-test -all: build test -.PHONY: build push unit-test build-integration-test-image integration-test integration-build-test test +all: all-build test +.PHONY: build push push-manifest all-push all-push-images all-build unit-test build-integration-test-image integration-test integration-build-test test diff --git a/cluster/images/etcd/README.md b/cluster/images/etcd/README.md index ec92698b77c..25b4026be77 100644 --- a/cluster/images/etcd/README.md +++ b/cluster/images/etcd/README.md @@ -62,22 +62,14 @@ $ make build test Last, build and push the docker images for all supported architectures. ```console -# Build for linux/amd64 (default) -$ make push ARCH=amd64 -# ---> staging-k8s.gcr.io/etcd-amd64:TAG -# ---> staging-k8s.gcr.io/etcd:TAG +# Build images for all the architecture and push the manifest image as well +$ make all-push -$ make push ARCH=arm -# ---> staging-k8s.gcr.io/etcd-arm:TAG +# Build images for all the architecture +$ make all-build -$ make push ARCH=arm64 -# ---> staging-k8s.gcr.io/etcd-arm64:TAG - -$ make push ARCH=ppc64le -# ---> staging-k8s.gcr.io/etcd-ppc64le:TAG - -$ make push ARCH=s390x -# ---> staging-k8s.gcr.io/etcd-s390x:TAG +# Build image for target architecture(default=amd64) +$ make build ARCH=ppc64le ``` If you don't want to push the images, run `make` or `make build` instead diff --git a/cluster/images/etcd/migrate-if-needed.sh b/cluster/images/etcd/migrate-if-needed.sh index a40e6592962..38e2565fcf1 100755 --- a/cluster/images/etcd/migrate-if-needed.sh +++ b/cluster/images/etcd/migrate-if-needed.sh @@ -18,7 +18,7 @@ # This script performs etcd upgrade based on the following environmental # variables: # TARGET_STORAGE - API of etcd to be used (supported: 'etcd2', 'etcd3') -# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.12', '3.2.18', '3.2.24') +# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.12', '3.2.24') # DATA_DIRECTORY - directory with etcd data # # The current etcd version and storage format is detected based on the @@ -29,8 +29,7 @@ # - 2.2.1/etcd2 -> 2.3.7/etcd2 # - 2.3.7/etcd2 -> 3.0.17/etcd2 # - 3.0.17/etcd3 -> 3.1.12/etcd3 -# - 3.1.12/etcd3 -> 3.2.18/etcd3 -# - 3.2.18/etcd3 -> 3.2.24/etcd3 +# - 3.1.12/etcd3 -> 3.2.24/etcd3 # # NOTE: The releases supported in this script has to match release binaries # present in the etcd image (to make this script work correctly). @@ -43,7 +42,7 @@ set -o nounset # NOTE: BUNDLED_VERSION has to match release binaries present in the # etcd image (to make this script work correctly). -BUNDLED_VERSIONS="2.2.1, 2.3.7, 3.0.17, 3.1.12, 3.2.18 3.2.24" +BUNDLED_VERSIONS="2.2.1, 2.3.7, 3.0.17, 3.1.12, 3.2.24" ETCD_NAME="${ETCD_NAME:-etcd-$(hostname)}" if [ -z "${DATA_DIRECTORY:-}" ]; then diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 131ac141ad8..ea8a1b9e994 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -22,7 +22,7 @@ ARCH?=amd64 OUT_DIR?=_output HYPERKUBE_BIN?=$(shell pwd)/../../../$(OUT_DIR)/dockerized/bin/linux/$(ARCH)/hyperkube -BASEIMAGE=k8s.gcr.io/debian-hyperkube-base-$(ARCH):0.10.1 +BASEIMAGE=k8s.gcr.io/debian-hyperkube-base-$(ARCH):0.10.2 TEMP_DIR:=$(shell mktemp -d -t hyperkubeXXXXXX) all: build diff --git a/cluster/log-dump/log-dump.sh b/cluster/log-dump/log-dump.sh index a0d8743b0e8..60d962dc382 100755 --- a/cluster/log-dump/log-dump.sh +++ b/cluster/log-dump/log-dump.sh @@ -41,15 +41,15 @@ readonly master_ssh_supported_providers="gce aws kubernetes-anywhere" readonly node_ssh_supported_providers="gce gke aws kubernetes-anywhere" readonly gcloud_supported_providers="gce gke kubernetes-anywhere" -readonly master_logfiles="kube-apiserver kube-apiserver-audit kube-scheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd" -readonly node_logfiles="kube-proxy fluentd node-problem-detector" +readonly master_logfiles="kube-apiserver.log kube-apiserver-audit.log kube-scheduler.log kube-controller-manager.log etcd.log etcd-events.log glbc.log cluster-autoscaler.log kube-addon-manager.log fluentd.log kubelet.cov" +readonly node_logfiles="kube-proxy.log fluentd.log node-problem-detector.log kubelet.cov" readonly node_systemd_services="node-problem-detector" -readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-hollow-node-*" -readonly aws_logfiles="cloud-init-output" -readonly gce_logfiles="startupscript" -readonly kern_logfile="kern" -readonly initd_logfiles="docker" -readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr" +readonly hollow_node_logfiles="kubelet-hollow-node-*.log kubeproxy-hollow-node-*.log npd-hollow-node-*.log" +readonly aws_logfiles="cloud-init-output.log" +readonly gce_logfiles="startupscript.log" +readonly kern_logfile="kern.log" +readonly initd_logfiles="docker/log" +readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log" readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}" # Limit the number of concurrent node connections so that we don't run out of @@ -100,10 +100,10 @@ function copy-logs-from-node() { local -r node="${1}" local -r dir="${2}" local files=( ${3} ) - # Append ".log*" + # Append "*" # The * at the end is needed to also copy rotated logs (which happens # in large clusters and long runs). - files=( "${files[@]/%/.log*}" ) + files=( "${files[@]/%/*}" ) # Prepend "/var/log/" files=( "${files[@]/#/\/var\/log\/}" ) # Comma delimit (even the singleton, or scp does the wrong thing), surround by braces. @@ -168,6 +168,21 @@ function save-logs() { files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}" fi + # Try dumping coverage profiles, if it looks like coverage is enabled in the first place. + if log-dump-ssh "${node_name}" "stat /var/log/kubelet.cov" &> /dev/null; then + if log-dump-ssh "${node_name}" "command -v docker" &> /dev/null; then + if [[ "${on_master}" == "true" ]]; then + run-in-docker-container "${node_name}" "kube-apiserver" "cat /tmp/k8s-kube-apiserver.cov" > "${dir}/kube-apiserver.cov" || true + run-in-docker-container "${node_name}" "kube-scheduler" "cat /tmp/k8s-kube-scheduler.cov" > "${dir}/kube-scheduler.cov" || true + run-in-docker-container "${node_name}" "kube-controller-manager" "cat /tmp/k8s-kube-controller-manager.cov" > "${dir}/kube-controller-manager.cov" || true + else + run-in-docker-container "${node_name}" "kube-proxy" "cat /tmp/k8s-kube-proxy.cov" > "${dir}/kube-proxy.cov" || true + fi + else + echo "Coverage profiles seem to exist, but cannot be retrieved from inside containers." + fi + fi + echo "Changing logfiles to be world-readable for download" log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true @@ -175,6 +190,15 @@ function save-logs() { copy-logs-from-node "${node_name}" "${dir}" "${files}" } +# Execute a command in container $2 on node $1. +# Uses docker because the container may not ordinarily permit direct execution. +function run-in-docker-container() { + local node_name="$1" + local container="$2" + shift 2 + log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $@" +} + function dump_masters() { local master_names if [[ -n "${use_custom_instance_list}" ]]; then diff --git a/cmd/kube-apiserver/app/BUILD b/cmd/kube-apiserver/app/BUILD index a053b2afae9..6a325451535 100644 --- a/cmd/kube-apiserver/app/BUILD +++ b/cmd/kube-apiserver/app/BUILD @@ -33,7 +33,7 @@ go_library( "//pkg/master/controller/crdregistration:go_default_library", "//pkg/master/reconcilers:go_default_library", "//pkg/master/tunneler:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//pkg/registry/cachesize:go_default_library", "//pkg/registry/rbac/rest:go_default_library", "//pkg/serviceaccount:go_default_library", diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 455b5a6e8b4..b5f7127367d 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -81,7 +81,7 @@ import ( "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/master/reconcilers" "k8s.io/kubernetes/pkg/master/tunneler" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/pkg/registry/cachesize" rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest" "k8s.io/kubernetes/pkg/serviceaccount" diff --git a/cmd/kube-controller-manager/app/BUILD b/cmd/kube-controller-manager/app/BUILD index f183dda468d..03fdf8f90bb 100644 --- a/cmd/kube-controller-manager/app/BUILD +++ b/cmd/kube-controller-manager/app/BUILD @@ -75,8 +75,8 @@ go_library( "//pkg/controller/volume/pvcprotection:go_default_library", "//pkg/controller/volume/pvprotection:go_default_library", "//pkg/features:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//pkg/serviceaccount:go_default_library", "//pkg/util/configz:go_default_library", "//pkg/util/flag:go_default_library", @@ -84,7 +84,7 @@ go_library( "//pkg/version:go_default_library", "//pkg/version/verflag:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/aws_ebs:go_default_library", + "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", "//pkg/volume/azure_file:go_default_library", "//pkg/volume/cinder:go_default_library", @@ -107,7 +107,6 @@ go_library( "//pkg/volume/util:go_default_library", "//pkg/volume/vsphere_volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index f6a273385a8..b82e902f531 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -31,7 +31,6 @@ import ( "net/http" "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/runtime/schema" utilfeature "k8s.io/apiserver/pkg/util/feature" cacheddiscovery "k8s.io/client-go/discovery/cached" @@ -59,8 +58,8 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/pvcprotection" "k8s.io/kubernetes/pkg/controller/volume/pvprotection" "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/quota/generic" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/pkg/util/metrics" ) @@ -199,13 +198,10 @@ func startAttachDetachController(ctx ControllerContext) (http.Handler, bool, err // csiClient works with CRDs that support json only csiClientConfig.ContentType = "application/json" - crdClientConfig := ctx.ClientBuilder.ConfigOrDie("attachdetach-controller") - attachDetachController, attachDetachControllerErr := attachdetach.NewAttachDetachController( ctx.ClientBuilder.ClientOrDie("attachdetach-controller"), csiclientset.NewForConfigOrDie(csiClientConfig), - apiextensionsclient.NewForConfigOrDie(crdClientConfig), ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Nodes(), ctx.InformerFactory.Core().V1().PersistentVolumeClaims(), diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index ba9bb40de90..b2511832022 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -30,7 +30,7 @@ import ( _ "k8s.io/kubernetes/pkg/cloudprovider/providers" // Volume plugins "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/aws_ebs" + "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_file" "k8s.io/kubernetes/pkg/volume/cinder" @@ -66,7 +66,7 @@ import ( func ProbeAttachableVolumePlugins() []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...) @@ -95,7 +95,7 @@ func GetDynamicPluginProber(config kubectrlmgrconfig.VolumeConfiguration) volume func ProbeExpandableVolumePlugins(config kubectrlmgrconfig.VolumeConfiguration) []volume.VolumePlugin { allPlugins := []volume.VolumePlugin{} - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, portworx.ProbeVolumePlugins()...) @@ -158,7 +158,7 @@ func ProbeControllerVolumePlugins(cloud cloudprovider.Interface, config kubectrl allPlugins = append(allPlugins, local.ProbeVolumePlugins()...) allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...) - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...) allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...) diff --git a/cmd/kube-proxy/app/conntrack.go b/cmd/kube-proxy/app/conntrack.go index dfb7efb94f5..5e858663fb1 100644 --- a/cmd/kube-proxy/app/conntrack.go +++ b/cmd/kube-proxy/app/conntrack.go @@ -95,9 +95,12 @@ func (rct realConntracker) SetTCPCloseWaitTimeout(seconds int) error { func (realConntracker) setIntSysCtl(name string, value int) error { entry := "net/netfilter/" + name - glog.Infof("Set sysctl '%v' to %v", entry, value) - if err := sysctl.New().SetSysctl(entry, value); err != nil { - return err + sys := sysctl.New() + if val, _ := sys.GetSysctl(entry); val != value { + glog.Infof("Set sysctl '%v' to %v", entry, value) + if err := sys.SetSysctl(entry, value); err != nil { + return err + } } return nil } diff --git a/cmd/kube-scheduler/app/options/BUILD b/cmd/kube-scheduler/app/options/BUILD index ebe68f08340..3531b68930c 100644 --- a/cmd/kube-scheduler/app/options/BUILD +++ b/cmd/kube-scheduler/app/options/BUILD @@ -20,9 +20,12 @@ go_library( "//pkg/scheduler/apis/config/validation:go_default_library", "//pkg/scheduler/factory:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", diff --git a/cmd/kube-scheduler/app/options/configfile.go b/cmd/kube-scheduler/app/options/configfile.go index 7648f76fd49..a564c71409e 100644 --- a/cmd/kube-scheduler/app/options/configfile.go +++ b/cmd/kube-scheduler/app/options/configfile.go @@ -17,11 +17,16 @@ limitations under the License. package options import ( + "bytes" "errors" + "fmt" "io/ioutil" "os" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" @@ -40,8 +45,48 @@ func loadConfigFromFile(file string) (*kubeschedulerconfig.KubeSchedulerConfigur func loadConfig(data []byte) (*kubeschedulerconfig.KubeSchedulerConfiguration, error) { configObj := &kubeschedulerconfig.KubeSchedulerConfiguration{} if err := runtime.DecodeInto(kubeschedulerscheme.Codecs.UniversalDecoder(), data, configObj); err != nil { + + // if this is a componentconfig/v1alpha1 KubeSchedulerConfiguration object, coerce it to kubescheduler.config.k8s.io/v1alpha1 with a warning + // TODO: drop this block in 1.13 + if runtime.IsNotRegisteredError(err) { + originalErr := err + var ( + u = &unstructured.Unstructured{} + codec = json.NewYAMLSerializer(json.DefaultMetaFactory, kubeschedulerscheme.Scheme, kubeschedulerscheme.Scheme) + legacyConfigGVK = schema.GroupVersionKind{Group: "componentconfig", Version: "v1alpha1", Kind: "KubeSchedulerConfiguration"} + ) + // attempt to decode to an unstructured object + obj, gvk, err := codec.Decode(data, nil, u) + + // if this errored, or the object we read was not the legacy alpha gvk, return the original error + if err != nil || gvk == nil || *gvk != legacyConfigGVK { + return nil, originalErr + } + + fmt.Printf("WARNING: the provided config file is an unsupported apiVersion (%q), which will be removed in future releases\n\n", legacyConfigGVK.GroupVersion().String()) + fmt.Printf("WARNING: switch to command-line flags or update your config file apiVersion to %q\n\n", kubeschedulerconfigv1alpha1.SchemeGroupVersion.String()) + fmt.Printf("WARNING: apiVersions at alpha-level are not guaranteed to be supported in future releases\n\n") + + // attempt to coerce to the new alpha gvk + if err := meta.NewAccessor().SetAPIVersion(obj, kubeschedulerconfigv1alpha1.SchemeGroupVersion.String()); err != nil { + // return the original error on failure + return nil, originalErr + } + + // attempt to encode the coerced apiVersion back to bytes + buffer := bytes.NewBuffer([]byte{}) + if err := codec.Encode(obj, buffer); err != nil { + // return the original error on failure + return nil, originalErr + } + + // re-attempt to load the coerced apiVersion + return loadConfig(buffer.Bytes()) + } + return nil, err } + return configObj, nil } diff --git a/cmd/kube-scheduler/app/options/insecure_serving.go b/cmd/kube-scheduler/app/options/insecure_serving.go index 320b9352b83..78662f69eb3 100644 --- a/cmd/kube-scheduler/app/options/insecure_serving.go +++ b/cmd/kube-scheduler/app/options/insecure_serving.go @@ -28,7 +28,7 @@ import ( kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" ) -// CombinedInsecureServingOptions sets up up to two insecure listeners for healthz and metrics. The flags +// CombinedInsecureServingOptions sets up to two insecure listeners for healthz and metrics. The flags // override the ComponentConfig and DeprecatedInsecureServingOptions values for both. type CombinedInsecureServingOptions struct { Healthz *apiserveroptions.DeprecatedInsecureServingOptions diff --git a/cmd/kube-scheduler/app/options/options.go b/cmd/kube-scheduler/app/options/options.go index 20768465ecb..f35c173687a 100644 --- a/cmd/kube-scheduler/app/options/options.go +++ b/cmd/kube-scheduler/app/options/options.go @@ -205,7 +205,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) { return nil, err } - // prepare kube clients. + // Prepare kube clients. client, leaderElectionClient, eventClient, err := createClients(c.ComponentConfig.ClientConnection, o.Master, c.ComponentConfig.LeaderElection.RenewDeadline.Duration) if err != nil { return nil, err diff --git a/cmd/kube-scheduler/app/options/options_test.go b/cmd/kube-scheduler/app/options/options_test.go index bacab57fc58..e8e683c82ea 100644 --- a/cmd/kube-scheduler/app/options/options_test.go +++ b/cmd/kube-scheduler/app/options/options_test.go @@ -111,6 +111,17 @@ leaderElection: t.Fatal(err) } + invalidconfigFile := filepath.Join(tmpDir, "scheduler_invalid.yaml") + if err := ioutil.WriteFile(invalidconfigFile, []byte(fmt.Sprintf(` +apiVersion: componentconfig/v1alpha2 +kind: KubeSchedulerConfiguration +clientConnection: + kubeconfig: "%s" +leaderElection: + leaderElect: true`, configKubeconfig)), os.FileMode(0600)); err != nil { + t.Fatal(err) + } + // flag-specified kubeconfig flagKubeconfig := filepath.Join(tmpDir, "flag.kubeconfig") if err := ioutil.WriteFile(flagKubeconfig, []byte(fmt.Sprintf(` @@ -195,9 +206,53 @@ users: }, }, { - name: "config file in componentconfig/v1alpha1", - options: &Options{ConfigFile: oldconfigFile}, - expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\" in scheme", + name: "config file in componentconfig/v1alpha1", + options: &Options{ + ConfigFile: oldconfigFile, + ComponentConfig: func() kubeschedulerconfig.KubeSchedulerConfiguration { + cfg, err := newDefaultComponentConfig() + if err != nil { + t.Fatal(err) + } + return *cfg + }(), + }, + // TODO: switch this to expect an error in 1.13 when the special-case coercion is removed from loadConfig + // expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha1\"", + expectedUsername: "config", + expectedConfig: kubeschedulerconfig.KubeSchedulerConfiguration{ + SchedulerName: "default-scheduler", + AlgorithmSource: kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &defaultSource}, + HardPodAffinitySymmetricWeight: 1, + HealthzBindAddress: "0.0.0.0:10251", + MetricsBindAddress: "0.0.0.0:10251", + FailureDomains: "kubernetes.io/hostname,failure-domain.beta.kubernetes.io/zone,failure-domain.beta.kubernetes.io/region", + LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{ + LeaderElectionConfiguration: apiserverconfig.LeaderElectionConfiguration{ + LeaderElect: true, + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "endpoints", + }, + LockObjectNamespace: "kube-system", + LockObjectName: "kube-scheduler", + }, + ClientConnection: apimachineryconfig.ClientConnectionConfiguration{ + Kubeconfig: configKubeconfig, + QPS: 50, + Burst: 100, + ContentType: "application/vnd.kubernetes.protobuf", + }, + PercentageOfNodesToScore: 50, + BindTimeoutSeconds: &defaultBindTimeoutSeconds, + }, + }, + + { + name: "invalid config file in componentconfig/v1alpha2", + options: &Options{ConfigFile: invalidconfigFile}, + expectedError: "no kind \"KubeSchedulerConfiguration\" is registered for version \"componentconfig/v1alpha2\"", }, { name: "kubeconfig flag", diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index a8db9954ec2..32eaa86c0e0 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -81,35 +81,7 @@ constraints, affinity and anti-affinity specifications, data locality, inter-wor interference, deadlines, and so on. Workload-specific requirements will be exposed through the API as necessary.`, Run: func(cmd *cobra.Command, args []string) { - verflag.PrintAndExitIfRequested() - utilflag.PrintFlags(cmd.Flags()) - - if len(args) != 0 { - fmt.Fprint(os.Stderr, "arguments are not supported\n") - } - - if errs := opts.Validate(); len(errs) > 0 { - fmt.Fprintf(os.Stderr, "%v\n", utilerrors.NewAggregate(errs)) - os.Exit(1) - } - - if len(opts.WriteConfigTo) > 0 { - if err := options.WriteConfigFile(opts.WriteConfigTo, &opts.ComponentConfig); err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - glog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo) - return - } - - c, err := opts.Config() - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } - - stopCh := make(chan struct{}) - if err := Run(c.Complete(), stopCh); err != nil { + if err := run(cmd, args, opts); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -122,8 +94,39 @@ through the API as necessary.`, return cmd } -// Run runs the Scheduler. -func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error { +// run runs the scheduler. +func run(cmd *cobra.Command, args []string, opts *options.Options) error { + verflag.PrintAndExitIfRequested() + utilflag.PrintFlags(cmd.Flags()) + + if len(args) != 0 { + fmt.Fprint(os.Stderr, "arguments are not supported\n") + } + + if errs := opts.Validate(); len(errs) > 0 { + fmt.Fprintf(os.Stderr, "%v\n", utilerrors.NewAggregate(errs)) + os.Exit(1) + } + + if len(opts.WriteConfigTo) > 0 { + if err := options.WriteConfigFile(opts.WriteConfigTo, &opts.ComponentConfig); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + glog.Infof("Wrote configuration to: %s\n", opts.WriteConfigTo) + } + + c, err := opts.Config() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + + stopCh := make(chan struct{}) + + // Get the completed config + cc := c.Complete() + // To help debugging, immediately log version glog.Infof("Version: %+v", version.Get()) @@ -139,7 +142,7 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error } // Build a scheduler config from the provided algorithm source. - schedulerConfig, err := NewSchedulerConfig(c) + schedulerConfig, err := NewSchedulerConfig(cc) if err != nil { return err } @@ -148,39 +151,39 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error sched := scheduler.NewFromConfig(schedulerConfig) // Prepare the event broadcaster. - if c.Broadcaster != nil && c.EventClient != nil { - c.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.EventClient.Events("")}) + if cc.Broadcaster != nil && cc.EventClient != nil { + cc.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: cc.EventClient.Events("")}) } // Start up the healthz server. - if c.InsecureServing != nil { - separateMetrics := c.InsecureMetricsServing != nil - handler := buildHandlerChain(newHealthzHandler(&c.ComponentConfig, separateMetrics), nil, nil) - if err := c.InsecureServing.Serve(handler, 0, stopCh); err != nil { + if cc.InsecureServing != nil { + separateMetrics := cc.InsecureMetricsServing != nil + handler := buildHandlerChain(newHealthzHandler(&cc.ComponentConfig, separateMetrics), nil, nil) + if err := cc.InsecureServing.Serve(handler, 0, stopCh); err != nil { return fmt.Errorf("failed to start healthz server: %v", err) } } - if c.InsecureMetricsServing != nil { - handler := buildHandlerChain(newMetricsHandler(&c.ComponentConfig), nil, nil) - if err := c.InsecureMetricsServing.Serve(handler, 0, stopCh); err != nil { + if cc.InsecureMetricsServing != nil { + handler := buildHandlerChain(newMetricsHandler(&cc.ComponentConfig), nil, nil) + if err := cc.InsecureMetricsServing.Serve(handler, 0, stopCh); err != nil { return fmt.Errorf("failed to start metrics server: %v", err) } } - if c.SecureServing != nil { - handler := buildHandlerChain(newHealthzHandler(&c.ComponentConfig, false), c.Authentication.Authenticator, c.Authorization.Authorizer) - if err := c.SecureServing.Serve(handler, 0, stopCh); err != nil { + if cc.SecureServing != nil { + handler := buildHandlerChain(newHealthzHandler(&cc.ComponentConfig, false), cc.Authentication.Authenticator, cc.Authorization.Authorizer) + if err := cc.SecureServing.Serve(handler, 0, stopCh); err != nil { // fail early for secure handlers, removing the old error loop from above return fmt.Errorf("failed to start healthz server: %v", err) } } // Start all informers. - go c.PodInformer.Informer().Run(stopCh) - c.InformerFactory.Start(stopCh) + go cc.PodInformer.Informer().Run(stopCh) + cc.InformerFactory.Start(stopCh) // Wait for all caches to sync before scheduling. - c.InformerFactory.WaitForCacheSync(stopCh) - controller.WaitForCacheSync("scheduler", stopCh, c.PodInformer.Informer().HasSynced) + cc.InformerFactory.WaitForCacheSync(stopCh) + controller.WaitForCacheSync("scheduler", stopCh, cc.PodInformer.Informer().HasSynced) // Prepare a reusable run function. run := func(ctx context.Context) { @@ -200,14 +203,14 @@ func Run(c schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error }() // If leader election is enabled, run via LeaderElector until done and exit. - if c.LeaderElection != nil { - c.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{ + if cc.LeaderElection != nil { + cc.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{ OnStartedLeading: run, OnStoppedLeading: func() { utilruntime.HandleError(fmt.Errorf("lost master")) }, } - leaderElector, err := leaderelection.NewLeaderElector(*c.LeaderElection) + leaderElector, err := leaderelection.NewLeaderElector(*cc.LeaderElection) if err != nil { return fmt.Errorf("couldn't create leader elector: %v", err) } diff --git a/cmd/kubeadm/.import-restrictions b/cmd/kubeadm/.import-restrictions index b2a0616312c..1f3124a0091 100644 --- a/cmd/kubeadm/.import-restrictions +++ b/cmd/kubeadm/.import-restrictions @@ -139,7 +139,6 @@ "k8s.io/kubernetes/pkg/util/procfs", "k8s.io/kubernetes/pkg/util/slice", "k8s.io/kubernetes/pkg/util/taints", - "k8s.io/kubernetes/pkg/util/version", "k8s.io/kubernetes/pkg/util/ipvs", "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/pkg/volume", diff --git a/cmd/kubeadm/app/apis/kubeadm/BUILD b/cmd/kubeadm/app/apis/kubeadm/BUILD index 3036d93c72b..7869ddb8784 100644 --- a/cmd/kubeadm/app/apis/kubeadm/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/BUILD @@ -43,8 +43,6 @@ filegroup( ":package-srcs", "//cmd/kubeadm/app/apis/kubeadm/fuzzer:all-srcs", "//cmd/kubeadm/app/apis/kubeadm/scheme:all-srcs", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha1:all-srcs", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:all-srcs", "//cmd/kubeadm/app/apis/kubeadm/v1alpha3:all-srcs", "//cmd/kubeadm/app/apis/kubeadm/validation:all-srcs", ], diff --git a/cmd/kubeadm/app/apis/kubeadm/doc.go b/cmd/kubeadm/app/apis/kubeadm/doc.go index 21b45eda4ad..8998f14fe79 100644 --- a/cmd/kubeadm/app/apis/kubeadm/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/doc.go @@ -14,10 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package kubeadm is the package that contains the libraries that drive the kubeadm binary. -// kubeadm is responsible for handling a Kubernetes cluster's lifecycle. - // +k8s:deepcopy-gen=package // +groupName=kubeadm.k8s.io +// Package kubeadm is the package that contains the libraries that drive the kubeadm binary. +// kubeadm is responsible for handling a Kubernetes cluster's lifecycle. package kubeadm // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" diff --git a/cmd/kubeadm/app/apis/kubeadm/scheme/BUILD b/cmd/kubeadm/app/apis/kubeadm/scheme/BUILD index e374a07a343..4ccb7a02faa 100644 --- a/cmd/kubeadm/app/apis/kubeadm/scheme/BUILD +++ b/cmd/kubeadm/app/apis/kubeadm/scheme/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha3:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/cmd/kubeadm/app/apis/kubeadm/scheme/scheme.go b/cmd/kubeadm/app/apis/kubeadm/scheme/scheme.go index eddb1eeacf2..5bc5519379a 100644 --- a/cmd/kubeadm/app/apis/kubeadm/scheme/scheme.go +++ b/cmd/kubeadm/app/apis/kubeadm/scheme/scheme.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" ) @@ -41,7 +40,6 @@ func init() { // AddToScheme builds the kubeadm scheme using all known versions of the kubeadm api. func AddToScheme(scheme *runtime.Scheme) { utilruntime.Must(kubeadm.AddToScheme(scheme)) - utilruntime.Must(v1alpha2.AddToScheme(scheme)) utilruntime.Must(v1alpha3.AddToScheme(scheme)) utilruntime.Must(scheme.SetVersionPriority(v1alpha3.SchemeGroupVersion)) } diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD deleted file mode 100644 index 6df04e38cd7..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD deleted file mode 100644 index 0458a2f0229..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/BUILD +++ /dev/null @@ -1,61 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "bootstraptokenstring.go", - "conversion.go", - "defaults.go", - "defaults_unix.go", - "defaults_windows.go", - "doc.go", - "register.go", - "types.go", - "zz_generated.conversion.go", - "zz_generated.deepcopy.go", - "zz_generated.defaults.go", - ], - importpath = "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2", - visibility = ["//visibility:public"], - deps = [ - "//cmd/kubeadm/app/apis/kubeadm:go_default_library", - "//cmd/kubeadm/app/componentconfigs:go_default_library", - "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/kubelet/apis/config:go_default_library", - "//pkg/kubelet/apis/config/scheme:go_default_library", - "//pkg/kubelet/apis/config/v1beta1:go_default_library", - "//pkg/proxy/apis/config:go_default_library", - "//pkg/proxy/apis/config/scheme:go_default_library", - "//pkg/proxy/apis/config/v1alpha1:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/client-go/tools/bootstrap/token/api:go_default_library", - "//staging/src/k8s.io/client-go/tools/bootstrap/token/util:go_default_library", - "//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library", - "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["bootstraptokenstring_test.go"], - embed = [":go_default_library"], -) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring.go deleted file mode 100644 index d62d5a7438c..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha2 holds the external kubeadm API types of version v1alpha2 -// Note: This file should be kept in sync with the similar one for the internal API -// TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future -// (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. -package v1alpha2 - -import ( - "fmt" - "strings" - - bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api" - bootstraputil "k8s.io/client-go/tools/bootstrap/token/util" -) - -// BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used -// for both validation of the practically of the API server from a joining node's point -// of view and as an authentication method for the node in the bootstrap phase of -// "kubeadm join". This token is and should be short-lived -type BootstrapTokenString struct { - ID string - Secret string -} - -// MarshalJSON implements the json.Marshaler interface. -func (bts BootstrapTokenString) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%s"`, bts.String())), nil -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (bts *BootstrapTokenString) UnmarshalJSON(b []byte) error { - // If the token is represented as "", just return quickly without an error - if len(b) == 0 { - return nil - } - - // Remove unnecessary " characters coming from the JSON parser - token := strings.Replace(string(b), `"`, ``, -1) - // Convert the string Token to a BootstrapTokenString object - newbts, err := NewBootstrapTokenString(token) - if err != nil { - return err - } - bts.ID = newbts.ID - bts.Secret = newbts.Secret - return nil -} - -// String returns the string representation of the BootstrapTokenString -func (bts BootstrapTokenString) String() string { - if len(bts.ID) > 0 && len(bts.Secret) > 0 { - return bootstraputil.TokenFromIDAndSecret(bts.ID, bts.Secret) - } - return "" -} - -// NewBootstrapTokenString converts the given Bootstrap Token as a string -// to the BootstrapTokenString object used for serialization/deserialization -// and internal usage. It also automatically validates that the given token -// is of the right format -func NewBootstrapTokenString(token string) (*BootstrapTokenString, error) { - substrs := bootstraputil.BootstrapTokenRegexp.FindStringSubmatch(token) - // TODO: Add a constant for the 3 value here, and explain better why it's needed (other than because how the regexp parsin works) - if len(substrs) != 3 { - return nil, fmt.Errorf("the bootstrap token %q was not of the form %q", token, bootstrapapi.BootstrapTokenPattern) - } - - return &BootstrapTokenString{ID: substrs[1], Secret: substrs[2]}, nil -} - -// NewBootstrapTokenStringFromIDAndSecret is a wrapper around NewBootstrapTokenString -// that allows the caller to specify the ID and Secret separately -func NewBootstrapTokenStringFromIDAndSecret(id, secret string) (*BootstrapTokenString, error) { - return NewBootstrapTokenString(bootstraputil.TokenFromIDAndSecret(id, secret)) -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring_test.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring_test.go deleted file mode 100644 index 0d06bd153e3..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/bootstraptokenstring_test.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" -) - -func TestMarshalJSON(t *testing.T) { - var tests = []struct { - bts BootstrapTokenString - expected string - }{ - {BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, `"abcdef.abcdef0123456789"`}, - {BootstrapTokenString{ID: "foo", Secret: "bar"}, `"foo.bar"`}, - {BootstrapTokenString{ID: "h", Secret: "b"}, `"h.b"`}, - } - for _, rt := range tests { - b, err := json.Marshal(rt.bts) - if err != nil { - t.Fatalf("json.Marshal returned an unexpected error: %v", err) - } - if string(b) != rt.expected { - t.Errorf( - "failed BootstrapTokenString.MarshalJSON:\n\texpected: %s\n\t actual: %s", - rt.expected, - string(b), - ) - } - } -} - -func TestUnmarshalJSON(t *testing.T) { - var tests = []struct { - input string - bts *BootstrapTokenString - expectedError bool - }{ - {`"f.s"`, &BootstrapTokenString{}, true}, - {`"abcdef."`, &BootstrapTokenString{}, true}, - {`"abcdef:abcdef0123456789"`, &BootstrapTokenString{}, true}, - {`abcdef.abcdef0123456789`, &BootstrapTokenString{}, true}, - {`"abcdef.abcdef0123456789`, &BootstrapTokenString{}, true}, - {`"abcdef.ABCDEF0123456789"`, &BootstrapTokenString{}, true}, - {`"abcdef.abcdef0123456789"`, &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, false}, - {`"123456.aabbccddeeffgghh"`, &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}, false}, - } - for _, rt := range tests { - newbts := &BootstrapTokenString{} - err := json.Unmarshal([]byte(rt.input), newbts) - if (err != nil) != rt.expectedError { - t.Errorf("failed BootstrapTokenString.UnmarshalJSON:\n\texpected error: %t\n\t actual error: %v", rt.expectedError, err) - } else if !reflect.DeepEqual(rt.bts, newbts) { - t.Errorf( - "failed BootstrapTokenString.UnmarshalJSON:\n\texpected: %v\n\t actual: %v", - rt.bts, - newbts, - ) - } - } -} - -func TestJSONRoundtrip(t *testing.T) { - var tests = []struct { - input string - bts *BootstrapTokenString - }{ - {`"abcdef.abcdef0123456789"`, nil}, - {"", &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}}, - } - for _, rt := range tests { - if err := roundtrip(rt.input, rt.bts); err != nil { - t.Errorf("failed BootstrapTokenString JSON roundtrip with error: %v", err) - } - } -} - -func roundtrip(input string, bts *BootstrapTokenString) error { - var b []byte - var err error - newbts := &BootstrapTokenString{} - // If string input was specified, roundtrip like this: string -> (unmarshal) -> object -> (marshal) -> string - if len(input) > 0 { - if err := json.Unmarshal([]byte(input), newbts); err != nil { - return fmt.Errorf("expected no unmarshal error, got error: %v", err) - } - if b, err = json.Marshal(newbts); err != nil { - return fmt.Errorf("expected no marshal error, got error: %v", err) - } - if input != string(b) { - return fmt.Errorf( - "expected token: %s\n\t actual: %s", - input, - string(b), - ) - } - } else { // Otherwise, roundtrip like this: object -> (marshal) -> string -> (unmarshal) -> object - if b, err = json.Marshal(bts); err != nil { - return fmt.Errorf("expected no marshal error, got error: %v", err) - } - if err := json.Unmarshal(b, newbts); err != nil { - return fmt.Errorf("expected no unmarshal error, got error: %v", err) - } - if !reflect.DeepEqual(bts, newbts) { - return fmt.Errorf( - "expected object: %v\n\t actual: %v", - bts, - newbts, - ) - } - } - return nil -} - -func TestTokenFromIDAndSecret(t *testing.T) { - var tests = []struct { - bts BootstrapTokenString - expected string - }{ - {BootstrapTokenString{ID: "foo", Secret: "bar"}, "foo.bar"}, - {BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}, "abcdef.abcdef0123456789"}, - {BootstrapTokenString{ID: "h", Secret: "b"}, "h.b"}, - } - for _, rt := range tests { - actual := rt.bts.String() - if actual != rt.expected { - t.Errorf( - "failed BootstrapTokenString.String():\n\texpected: %s\n\t actual: %s", - rt.expected, - actual, - ) - } - } -} - -func TestNewBootstrapTokenString(t *testing.T) { - var tests = []struct { - token string - expectedError bool - bts *BootstrapTokenString - }{ - {token: "", expectedError: true, bts: nil}, - {token: ".", expectedError: true, bts: nil}, - {token: "1234567890123456789012", expectedError: true, bts: nil}, // invalid parcel size - {token: "12345.1234567890123456", expectedError: true, bts: nil}, // invalid parcel size - {token: ".1234567890123456", expectedError: true, bts: nil}, // invalid parcel size - {token: "123456.", expectedError: true, bts: nil}, // invalid parcel size - {token: "123456:1234567890.123456", expectedError: true, bts: nil}, // invalid separation - {token: "abcdef:1234567890123456", expectedError: true, bts: nil}, // invalid separation - {token: "Abcdef.1234567890123456", expectedError: true, bts: nil}, // invalid token id - {token: "123456.AABBCCDDEEFFGGHH", expectedError: true, bts: nil}, // invalid token secret - {token: "123456.AABBCCD-EEFFGGHH", expectedError: true, bts: nil}, // invalid character - {token: "abc*ef.1234567890123456", expectedError: true, bts: nil}, // invalid character - {token: "abcdef.1234567890123456", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "1234567890123456"}}, - {token: "123456.aabbccddeeffgghh", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}}, - {token: "abcdef.abcdef0123456789", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}}, - {token: "123456.1234560123456789", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "1234560123456789"}}, - } - for _, rt := range tests { - actual, err := NewBootstrapTokenString(rt.token) - if (err != nil) != rt.expectedError { - t.Errorf( - "failed NewBootstrapTokenString for the token %q\n\texpected error: %t\n\t actual error: %v", - rt.token, - rt.expectedError, - err, - ) - } else if !reflect.DeepEqual(actual, rt.bts) { - t.Errorf( - "failed NewBootstrapTokenString for the token %q\n\texpected: %v\n\t actual: %v", - rt.token, - rt.bts, - actual, - ) - } - } -} - -func TestNewBootstrapTokenStringFromIDAndSecret(t *testing.T) { - var tests = []struct { - id, secret string - expectedError bool - bts *BootstrapTokenString - }{ - {id: "", secret: "", expectedError: true, bts: nil}, - {id: "1234567890123456789012", secret: "", expectedError: true, bts: nil}, // invalid parcel size - {id: "12345", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid parcel size - {id: "", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid parcel size - {id: "123456", secret: "", expectedError: true, bts: nil}, // invalid parcel size - {id: "Abcdef", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid token id - {id: "123456", secret: "AABBCCDDEEFFGGHH", expectedError: true, bts: nil}, // invalid token secret - {id: "123456", secret: "AABBCCD-EEFFGGHH", expectedError: true, bts: nil}, // invalid character - {id: "abc*ef", secret: "1234567890123456", expectedError: true, bts: nil}, // invalid character - {id: "abcdef", secret: "1234567890123456", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "1234567890123456"}}, - {id: "123456", secret: "aabbccddeeffgghh", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "aabbccddeeffgghh"}}, - {id: "abcdef", secret: "abcdef0123456789", expectedError: false, bts: &BootstrapTokenString{ID: "abcdef", Secret: "abcdef0123456789"}}, - {id: "123456", secret: "1234560123456789", expectedError: false, bts: &BootstrapTokenString{ID: "123456", Secret: "1234560123456789"}}, - } - for _, rt := range tests { - actual, err := NewBootstrapTokenStringFromIDAndSecret(rt.id, rt.secret) - if (err != nil) != rt.expectedError { - t.Errorf( - "failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected error: %t\n\t actual error: %v", - rt.id, - rt.secret, - rt.expectedError, - err, - ) - } else if !reflect.DeepEqual(actual, rt.bts) { - t.Errorf( - "failed NewBootstrapTokenStringFromIDAndSecret for the token with id %q and secret %q\n\texpected: %v\n\t actual: %v", - rt.id, - rt.secret, - rt.bts, - actual, - ) - } - } -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/conversion.go deleted file mode 100644 index a5de5c45631..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/conversion.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "unsafe" - - "k8s.io/apimachinery/pkg/conversion" - kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" - kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" -) - -func Convert_v1alpha2_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { - if err := autoConvert_v1alpha2_InitConfiguration_To_kubeadm_InitConfiguration(in, out, s); err != nil { - return err - } - if err := split_v1alpha2_InitConfiguration_into_kubeadm_ClusterConfiguration(in, &out.ClusterConfiguration, s); err != nil { - return err - } - if err := split_v1alpha2_InitConfiguration_into_kubeadm_APIEndpoint(in, &out.APIEndpoint, s); err != nil { - return err - } - return nil -} - -func split_v1alpha2_InitConfiguration_into_kubeadm_APIEndpoint(in *InitConfiguration, out *kubeadm.APIEndpoint, s conversion.Scope) error { - out.AdvertiseAddress = in.API.AdvertiseAddress - out.BindPort = in.API.BindPort - // in.API.ControlPlaneEndpoint will be splitted into ClusterConfiguration - return nil -} - -func split_v1alpha2_InitConfiguration_into_kubeadm_ClusterConfiguration(in *InitConfiguration, out *kubeadm.ClusterConfiguration, s conversion.Scope) error { - if err := split_v1alpha2_InitConfiguration_into_kubeadm_ComponentConfigs(in, &out.ComponentConfigs, s); err != nil { - return err - } - if err := Convert_v1alpha2_Networking_To_kubeadm_Networking(&in.Networking, &out.Networking, s); err != nil { - return err - } - if err := Convert_v1alpha2_Etcd_To_kubeadm_Etcd(&in.Etcd, &out.Etcd, s); err != nil { - return err - } - if err := Convert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(&in.AuditPolicyConfiguration, &out.AuditPolicyConfiguration, s); err != nil { - return err - } - out.KubernetesVersion = in.KubernetesVersion - out.ControlPlaneEndpoint = in.API.ControlPlaneEndpoint - out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) - out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) - out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) - out.APIServerExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.APIServerExtraVolumes)) - out.ControllerManagerExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.ControllerManagerExtraVolumes)) - out.SchedulerExtraVolumes = *(*[]kubeadm.HostPathMount)(unsafe.Pointer(&in.SchedulerExtraVolumes)) - out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) - out.CertificatesDir = in.CertificatesDir - out.ImageRepository = in.ImageRepository - out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.ClusterName = in.ClusterName - return nil -} - -func split_v1alpha2_InitConfiguration_into_kubeadm_ComponentConfigs(in *InitConfiguration, out *kubeadm.ComponentConfigs, s conversion.Scope) error { - if in.KubeProxy.Config != nil { - if out.KubeProxy == nil { - out.KubeProxy = &kubeproxyconfig.KubeProxyConfiguration{} - } - - if err := componentconfigs.Scheme.Convert(in.KubeProxy.Config, out.KubeProxy, nil); err != nil { - return err - } - } - if in.KubeletConfiguration.BaseConfig != nil { - if out.Kubelet == nil { - out.Kubelet = &kubeletconfig.KubeletConfiguration{} - } - - if err := componentconfigs.Scheme.Convert(in.KubeletConfiguration.BaseConfig, out.Kubelet, nil); err != nil { - return err - } - } - return nil -} - -func Convert_v1alpha2_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error { - if err := autoConvert_v1alpha2_JoinConfiguration_To_kubeadm_JoinConfiguration(in, out, s); err != nil { - return err - } - out.APIEndpoint.AdvertiseAddress = in.AdvertiseAddress - out.APIEndpoint.BindPort = in.BindPort - return nil -} - -func Convert_kubeadm_InitConfiguration_To_v1alpha2_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - if err := autoConvert_kubeadm_InitConfiguration_To_v1alpha2_InitConfiguration(in, out, s); err != nil { - return err - } - if err := join_kubeadm_ClusterConfiguration_into_v1alpha2_InitConfiguration(&in.ClusterConfiguration, out, s); err != nil { - return err - } - if err := join_kubeadm_APIEndpoint_into_v1alpha2_InitConfiguration(&in.APIEndpoint, out, s); err != nil { - return err - } - return nil -} - -func join_kubeadm_ClusterConfiguration_into_v1alpha2_InitConfiguration(in *kubeadm.ClusterConfiguration, out *InitConfiguration, s conversion.Scope) error { - if err := join_kubeadm_ComponentConfigs_into_v1alpha2_InitConfiguration(&in.ComponentConfigs, out, s); err != nil { - return err - } - if err := Convert_kubeadm_Etcd_To_v1alpha2_Etcd(&in.Etcd, &out.Etcd, s); err != nil { - return err - } - if err := Convert_kubeadm_Networking_To_v1alpha2_Networking(&in.Networking, &out.Networking, s); err != nil { - return err - } - if err := Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration(&in.AuditPolicyConfiguration, &out.AuditPolicyConfiguration, s); err != nil { - return err - } - out.KubernetesVersion = in.KubernetesVersion - out.API.ControlPlaneEndpoint = in.ControlPlaneEndpoint - out.APIServerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.APIServerExtraArgs)) - out.ControllerManagerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ControllerManagerExtraArgs)) - out.SchedulerExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.SchedulerExtraArgs)) - out.APIServerExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.APIServerExtraVolumes)) - out.ControllerManagerExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.ControllerManagerExtraVolumes)) - out.SchedulerExtraVolumes = *(*[]HostPathMount)(unsafe.Pointer(&in.SchedulerExtraVolumes)) - out.APIServerCertSANs = *(*[]string)(unsafe.Pointer(&in.APIServerCertSANs)) - out.CertificatesDir = in.CertificatesDir - out.ImageRepository = in.ImageRepository - out.UnifiedControlPlaneImage = in.UnifiedControlPlaneImage - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - out.ClusterName = in.ClusterName - return nil -} - -func join_kubeadm_APIEndpoint_into_v1alpha2_InitConfiguration(in *kubeadm.APIEndpoint, out *InitConfiguration, s conversion.Scope) error { - out.API.AdvertiseAddress = in.AdvertiseAddress - out.API.BindPort = in.BindPort - // out.API.ControlPlaneEndpoint will join from ClusterConfiguration - return nil -} - -func join_kubeadm_ComponentConfigs_into_v1alpha2_InitConfiguration(in *kubeadm.ComponentConfigs, out *InitConfiguration, s conversion.Scope) error { - if in.KubeProxy != nil { - if out.KubeProxy.Config == nil { - out.KubeProxy.Config = &kubeproxyconfigv1alpha1.KubeProxyConfiguration{} - } - - if err := componentconfigs.Scheme.Convert(in.KubeProxy, out.KubeProxy.Config, nil); err != nil { - return err - } - } - if in.Kubelet != nil { - if out.KubeletConfiguration.BaseConfig == nil { - out.KubeletConfiguration.BaseConfig = &kubeletconfigv1beta1.KubeletConfiguration{} - } - - if err := componentconfigs.Scheme.Convert(in.Kubelet, out.KubeletConfiguration.BaseConfig, nil); err != nil { - return err - } - } - return nil -} - -func Convert_kubeadm_JoinConfiguration_To_v1alpha2_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error { - if err := autoConvert_kubeadm_JoinConfiguration_To_v1alpha2_JoinConfiguration(in, out, s); err != nil { - return err - } - out.AdvertiseAddress = in.APIEndpoint.AdvertiseAddress - out.BindPort = in.APIEndpoint.BindPort - return nil -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go deleted file mode 100644 index c37fe7ffffb..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults.go +++ /dev/null @@ -1,275 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "net/url" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/kubernetes/cmd/kubeadm/app/constants" - kubeletscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme" - kubeproxyscheme "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" - utilpointer "k8s.io/utils/pointer" -) - -const ( - // DefaultServiceDNSDomain defines default cluster-internal domain name for Services and Pods - DefaultServiceDNSDomain = "cluster.local" - // DefaultServicesSubnet defines default service subnet range - DefaultServicesSubnet = "10.96.0.0/12" - // DefaultClusterDNSIP defines default DNS IP - DefaultClusterDNSIP = "10.96.0.10" - // DefaultKubernetesVersion defines default kubernetes version - DefaultKubernetesVersion = "stable-1.11" - // DefaultAPIBindPort defines default API port - DefaultAPIBindPort = 6443 - // DefaultCertificatesDir defines default certificate directory - DefaultCertificatesDir = "/etc/kubernetes/pki" - // DefaultImageRepository defines default image registry - DefaultImageRepository = "k8s.gcr.io" - // DefaultManifestsDir defines default manifests directory - DefaultManifestsDir = "/etc/kubernetes/manifests" - // DefaultClusterName defines the default cluster name - DefaultClusterName = "kubernetes" - - // DefaultEtcdDataDir defines default location of etcd where static pods will save data to - DefaultEtcdDataDir = "/var/lib/etcd" - // DefaultProxyBindAddressv4 is the default bind address when the advertise address is v4 - DefaultProxyBindAddressv4 = "0.0.0.0" - // DefaultProxyBindAddressv6 is the default bind address when the advertise address is v6 - DefaultProxyBindAddressv6 = "::" - // KubeproxyKubeConfigFileName defines the file name for the kube-proxy's KubeConfig file - KubeproxyKubeConfigFileName = "/var/lib/kube-proxy/kubeconfig.conf" - - // DefaultDiscoveryTimeout specifies the default discovery timeout for kubeadm (used unless one is specified in the JoinConfiguration) - DefaultDiscoveryTimeout = 5 * time.Minute -) - -var ( - // DefaultAuditPolicyLogMaxAge is defined as a var so its address can be taken - // It is the number of days to store audit logs - DefaultAuditPolicyLogMaxAge = int32(2) -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -// SetDefaults_InitConfiguration assigns default values to Master node -func SetDefaults_InitConfiguration(obj *InitConfiguration) { - if obj.KubernetesVersion == "" { - obj.KubernetesVersion = DefaultKubernetesVersion - } - - if obj.API.BindPort == 0 { - obj.API.BindPort = DefaultAPIBindPort - } - - if obj.Networking.ServiceSubnet == "" { - obj.Networking.ServiceSubnet = DefaultServicesSubnet - } - - if obj.Networking.DNSDomain == "" { - obj.Networking.DNSDomain = DefaultServiceDNSDomain - } - - if obj.CertificatesDir == "" { - obj.CertificatesDir = DefaultCertificatesDir - } - - if obj.ImageRepository == "" { - obj.ImageRepository = DefaultImageRepository - } - - if obj.ClusterName == "" { - obj.ClusterName = DefaultClusterName - } - - SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) - SetDefaults_BootstrapTokens(obj) - SetDefaults_KubeletConfiguration(obj) - SetDefaults_Etcd(obj) - SetDefaults_ProxyConfiguration(obj) - SetDefaults_AuditPolicyConfiguration(obj) -} - -// SetDefaults_Etcd assigns default values for the Proxy -func SetDefaults_Etcd(obj *InitConfiguration) { - if obj.Etcd.External == nil && obj.Etcd.Local == nil { - obj.Etcd.Local = &LocalEtcd{} - } - if obj.Etcd.Local != nil { - if obj.Etcd.Local.DataDir == "" { - obj.Etcd.Local.DataDir = DefaultEtcdDataDir - } - } -} - -// SetDefaults_ProxyConfiguration assigns default values for the Proxy -func SetDefaults_ProxyConfiguration(obj *InitConfiguration) { - // IMPORTANT NOTE: If you're changing this code you should mirror it to cmd/kubeadm/app/componentconfig/defaults.go - // and cmd/kubeadm/app/apis/kubeadm/v1alpha3/conversion.go. - if obj.KubeProxy.Config == nil { - obj.KubeProxy.Config = &kubeproxyconfigv1alpha1.KubeProxyConfiguration{} - } - if obj.KubeProxy.Config.ClusterCIDR == "" && obj.Networking.PodSubnet != "" { - obj.KubeProxy.Config.ClusterCIDR = obj.Networking.PodSubnet - } - - if obj.KubeProxy.Config.ClientConnection.Kubeconfig == "" { - obj.KubeProxy.Config.ClientConnection.Kubeconfig = KubeproxyKubeConfigFileName - } - - kubeproxyscheme.Scheme.Default(obj.KubeProxy.Config) -} - -// SetDefaults_JoinConfiguration assigns default values to a regular node -func SetDefaults_JoinConfiguration(obj *JoinConfiguration) { - if obj.CACertPath == "" { - obj.CACertPath = DefaultCACertPath - } - if len(obj.TLSBootstrapToken) == 0 { - obj.TLSBootstrapToken = obj.Token - } - if len(obj.DiscoveryToken) == 0 && len(obj.DiscoveryFile) == 0 { - obj.DiscoveryToken = obj.Token - } - // Make sure file URLs become paths - if len(obj.DiscoveryFile) != 0 { - u, err := url.Parse(obj.DiscoveryFile) - if err == nil && u.Scheme == "file" { - obj.DiscoveryFile = u.Path - } - } - if obj.DiscoveryTimeout == nil { - obj.DiscoveryTimeout = &metav1.Duration{ - Duration: DefaultDiscoveryTimeout, - } - } - if obj.ClusterName == "" { - obj.ClusterName = DefaultClusterName - } - - if obj.BindPort == 0 { - obj.BindPort = DefaultAPIBindPort - } - - SetDefaults_NodeRegistrationOptions(&obj.NodeRegistration) -} - -// SetDefaults_KubeletConfiguration assigns default values to kubelet -func SetDefaults_KubeletConfiguration(obj *InitConfiguration) { - // IMPORTANT NOTE: If you're changing this code you should mirror it to cmd/kubeadm/app/componentconfig/defaults.go - // and cmd/kubeadm/app/apis/kubeadm/v1alpha3/conversion.go. - if obj.KubeletConfiguration.BaseConfig == nil { - obj.KubeletConfiguration.BaseConfig = &kubeletconfigv1beta1.KubeletConfiguration{} - } - if obj.KubeletConfiguration.BaseConfig.StaticPodPath == "" { - obj.KubeletConfiguration.BaseConfig.StaticPodPath = DefaultManifestsDir - } - if obj.KubeletConfiguration.BaseConfig.ClusterDNS == nil { - dnsIP, err := constants.GetDNSIP(obj.Networking.ServiceSubnet) - if err != nil { - obj.KubeletConfiguration.BaseConfig.ClusterDNS = []string{DefaultClusterDNSIP} - } else { - obj.KubeletConfiguration.BaseConfig.ClusterDNS = []string{dnsIP.String()} - } - } - if obj.KubeletConfiguration.BaseConfig.ClusterDomain == "" { - obj.KubeletConfiguration.BaseConfig.ClusterDomain = obj.Networking.DNSDomain - } - - // Enforce security-related kubelet options - - // Require all clients to the kubelet API to have client certs signed by the cluster CA - obj.KubeletConfiguration.BaseConfig.Authentication.X509.ClientCAFile = DefaultCACertPath - obj.KubeletConfiguration.BaseConfig.Authentication.Anonymous.Enabled = utilpointer.BoolPtr(false) - - // On every client request to the kubelet API, execute a webhook (SubjectAccessReview request) to the API server - // and ask it whether the client is authorized to access the kubelet API - obj.KubeletConfiguration.BaseConfig.Authorization.Mode = kubeletconfigv1beta1.KubeletAuthorizationModeWebhook - - // Let clients using other authentication methods like ServiceAccount tokens also access the kubelet API - obj.KubeletConfiguration.BaseConfig.Authentication.Webhook.Enabled = utilpointer.BoolPtr(true) - - // Disable the readonly port of the kubelet, in order to not expose unnecessary information - obj.KubeletConfiguration.BaseConfig.ReadOnlyPort = 0 - - // Enables client certificate rotation for the kubelet - obj.KubeletConfiguration.BaseConfig.RotateCertificates = true - - // Serve a /healthz webserver on localhost:10248 that kubeadm can talk to - obj.KubeletConfiguration.BaseConfig.HealthzBindAddress = "127.0.0.1" - obj.KubeletConfiguration.BaseConfig.HealthzPort = utilpointer.Int32Ptr(constants.KubeletHealthzPort) - - scheme, _, _ := kubeletscheme.NewSchemeAndCodecs() - if scheme != nil { - scheme.Default(obj.KubeletConfiguration.BaseConfig) - } -} - -func SetDefaults_NodeRegistrationOptions(obj *NodeRegistrationOptions) { - if obj.CRISocket == "" { - obj.CRISocket = DefaultCRISocket - } -} - -// SetDefaults_AuditPolicyConfiguration sets default values for the AuditPolicyConfiguration -func SetDefaults_AuditPolicyConfiguration(obj *InitConfiguration) { - if obj.AuditPolicyConfiguration.LogDir == "" { - obj.AuditPolicyConfiguration.LogDir = constants.StaticPodAuditPolicyLogDir - } - if obj.AuditPolicyConfiguration.LogMaxAge == nil { - obj.AuditPolicyConfiguration.LogMaxAge = &DefaultAuditPolicyLogMaxAge - } -} - -// SetDefaults_BootstrapTokens sets the defaults for the .BootstrapTokens field -// If the slice is empty, it's defaulted with one token. Otherwise it just loops -// through the slice and sets the defaults for the omitempty fields that are TTL, -// Usages and Groups. Token is NOT defaulted with a random one in the API defaulting -// layer, but set to a random value later at runtime if not set before. -func SetDefaults_BootstrapTokens(obj *InitConfiguration) { - - if obj.BootstrapTokens == nil || len(obj.BootstrapTokens) == 0 { - obj.BootstrapTokens = []BootstrapToken{{}} - } - - for i := range obj.BootstrapTokens { - SetDefaults_BootstrapToken(&obj.BootstrapTokens[i]) - } -} - -// SetDefaults_BootstrapToken sets the defaults for an individual Bootstrap Token -func SetDefaults_BootstrapToken(bt *BootstrapToken) { - if bt.TTL == nil { - bt.TTL = &metav1.Duration{ - Duration: constants.DefaultTokenDuration, - } - } - if len(bt.Usages) == 0 { - bt.Usages = constants.DefaultTokenUsages - } - - if len(bt.Groups) == 0 { - bt.Groups = constants.DefaultTokenGroups - } -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_unix.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_unix.go deleted file mode 100644 index 0c5ee1035a7..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -const ( - // DefaultCACertPath defines default location of CA certificate on Linux - DefaultCACertPath = "/etc/kubernetes/pki/ca.crt" - // DefaultCRISocket defines the default cri socket - DefaultCRISocket = "/var/run/dockershim.sock" -) diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/doc.go deleted file mode 100644 index 578b1b2269e..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha2 is the package that contains the libraries that drive the kubeadm binary. -// +k8s:defaulter-gen=TypeMeta -// +groupName=kubeadm.k8s.io -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm -package v1alpha2 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/register.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/register.go deleted file mode 100644 index d5cc0bc0d2b..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/register.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "kubeadm.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - - // SchemeBuilder points to a list of functions added to Scheme. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - // AddToScheme applies all the stored functions to the scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) -} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("MasterConfiguration"), &InitConfiguration{}) - scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("NodeConfiguration"), &JoinConfiguration{}) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go deleted file mode 100644 index 6bac7781cba..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/types.go +++ /dev/null @@ -1,331 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha2 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitConfiguration contains a list of elements which make up master's -// configuration object. -type InitConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // `kubeadm init`-only information. These fields are solely used the first time `kubeadm init` runs. - // After that, the information in the fields ARE NOT uploaded to the `kubeadm-config` ConfigMap - // that is used by `kubeadm upgrade` for instance. These fields must be omitempty. - - // BootstrapTokens is respected at `kubeadm init` time and describes a set of Bootstrap Tokens to create. - // This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature - BootstrapTokens []BootstrapToken `json:"bootstrapTokens,omitempty"` - - // NodeRegistration holds fields that relate to registering the new master node to the cluster - NodeRegistration NodeRegistrationOptions `json:"nodeRegistration,omitempty"` - - // Cluster-wide configuration - // TODO: Move these fields under some kind of ClusterConfiguration or similar struct that describes - // one cluster. Eventually we want this kind of spec to align well with the Cluster API spec. - - // API holds configuration for the k8s apiserver. - API API `json:"api"` - // KubeProxy holds configuration for the k8s service proxy. - KubeProxy KubeProxy `json:"kubeProxy"` - // Etcd holds configuration for etcd. - Etcd Etcd `json:"etcd"` - // KubeletConfiguration holds configuration for the kubelet. - KubeletConfiguration KubeletConfiguration `json:"kubeletConfiguration"` - // Networking holds configuration for the networking topology of the cluster. - Networking Networking `json:"networking"` - - // KubernetesVersion is the target version of the control plane. - KubernetesVersion string `json:"kubernetesVersion"` - - // APIServerExtraArgs is a set of extra flags to pass to the API Server or override - // default ones in form of =. - // TODO: This is temporary and ideally we would like to switch all components to - // use ComponentConfig + ConfigMaps. - APIServerExtraArgs map[string]string `json:"apiServerExtraArgs,omitempty"` - // ControllerManagerExtraArgs is a set of extra flags to pass to the Controller Manager - // or override default ones in form of = - // TODO: This is temporary and ideally we would like to switch all components to - // use ComponentConfig + ConfigMaps. - ControllerManagerExtraArgs map[string]string `json:"controllerManagerExtraArgs,omitempty"` - // SchedulerExtraArgs is a set of extra flags to pass to the Scheduler or override - // default ones in form of = - // TODO: This is temporary and ideally we would like to switch all components to - // use ComponentConfig + ConfigMaps. - SchedulerExtraArgs map[string]string `json:"schedulerExtraArgs,omitempty"` - - // APIServerExtraVolumes is an extra set of host volumes mounted to the API server. - APIServerExtraVolumes []HostPathMount `json:"apiServerExtraVolumes,omitempty"` - // ControllerManagerExtraVolumes is an extra set of host volumes mounted to the - // Controller Manager. - ControllerManagerExtraVolumes []HostPathMount `json:"controllerManagerExtraVolumes,omitempty"` - // SchedulerExtraVolumes is an extra set of host volumes mounted to the scheduler. - SchedulerExtraVolumes []HostPathMount `json:"schedulerExtraVolumes,omitempty"` - - // APIServerCertSANs sets extra Subject Alternative Names for the API Server signing cert. - APIServerCertSANs []string `json:"apiServerCertSANs,omitempty"` - // CertificatesDir specifies where to store or look for all required certificates. - CertificatesDir string `json:"certificatesDir"` - - // ImageRepository what container registry to pull control plane images from - ImageRepository string `json:"imageRepository"` - // UnifiedControlPlaneImage specifies if a specific container image should - // be used for all control plane components. - UnifiedControlPlaneImage string `json:"unifiedControlPlaneImage"` - - // AuditPolicyConfiguration defines the options for the api server audit system - AuditPolicyConfiguration AuditPolicyConfiguration `json:"auditPolicy"` - - // FeatureGates enabled by the user. - FeatureGates map[string]bool `json:"featureGates,omitempty"` - - // The cluster name - ClusterName string `json:"clusterName,omitempty"` -} - -// API struct contains elements of API server address. -type API struct { - // AdvertiseAddress sets the IP address for the API server to advertise. - AdvertiseAddress string `json:"advertiseAddress"` - // ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it - // can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. - // In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort - // are used; in case the ControlPlaneEndpoint is specified but without a TCP port, - // the BindPort is used. - // Possible usages are: - // e.g. In an cluster with more than one control plane instances, this field should be - // assigned the address of the external load balancer in front of the - // control plane instances. - // e.g. in environments with enforced node recycling, the ControlPlaneEndpoint - // could be used for assigning a stable DNS to the control plane. - ControlPlaneEndpoint string `json:"controlPlaneEndpoint"` - // BindPort sets the secure port for the API Server to bind to. - // Defaults to 6443. - BindPort int32 `json:"bindPort"` -} - -// NodeRegistrationOptions holds fields that relate to registering a new master or node to the cluster, either via "kubeadm init" or "kubeadm join" -type NodeRegistrationOptions struct { - - // Name is the `.Metadata.Name` field of the Node API object that will be created in this `kubeadm init` or `kubeadm joiń` operation. - // This field is also used in the CommonName field of the kubelet's client certificate to the API server. - // Defaults to the hostname of the node if not provided. - Name string `json:"name,omitempty"` - - // CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use - CRISocket string `json:"criSocket,omitempty"` - - // Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, in the `kubeadm init` process - // it will be defaulted to []v1.Taint{'node-role.kubernetes.io/master=""'}. If you don't want to taint your master node, set this field to an - // empty slice, i.e. `taints: {}` in the YAML file. This field is solely used for Node registration. - Taints []v1.Taint `json:"taints,omitempty"` - - // KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file - // kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config-1.X ConfigMap - // Flags have higher higher priority when parsing. These values are local and specific to the node kubeadm is executing on. - KubeletExtraArgs map[string]string `json:"kubeletExtraArgs,omitempty"` -} - -// Networking contains elements describing cluster's networking configuration -type Networking struct { - // ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". - ServiceSubnet string `json:"serviceSubnet"` - // PodSubnet is the subnet used by pods. - PodSubnet string `json:"podSubnet"` - // DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local". - DNSDomain string `json:"dnsDomain"` -} - -// BootstrapToken describes one bootstrap token, stored as a Secret in the cluster -type BootstrapToken struct { - // Token is used for establishing bidirectional trust between nodes and masters. - // Used for joining nodes in the cluster. - Token *BootstrapTokenString `json:"token"` - // Description sets a human-friendly message why this token exists and what it's used - // for, so other administrators can know its purpose. - Description string `json:"description,omitempty"` - // TTL defines the time to live for this token. Defaults to 24h. - // Expires and TTL are mutually exclusive. - TTL *metav1.Duration `json:"ttl,omitempty"` - // Expires specifies the timestamp when this token expires. Defaults to being set - // dynamically at runtime based on the TTL. Expires and TTL are mutually exclusive. - Expires *metav1.Time `json:"expires,omitempty"` - // Usages describes the ways in which this token can be used. Can by default be used - // for establishing bidirectional trust, but that can be changed here. - Usages []string `json:"usages,omitempty"` - // Groups specifies the extra groups that this token will authenticate as when/if - // used for authentication - Groups []string `json:"groups,omitempty"` -} - -// Etcd contains elements describing Etcd configuration. -type Etcd struct { - - // Local provides configuration knobs for configuring the local etcd instance - // Local and External are mutually exclusive - Local *LocalEtcd `json:"local,omitempty"` - - // External describes how to connect to an external etcd cluster - // Local and External are mutually exclusive - External *ExternalEtcd `json:"external,omitempty"` -} - -// LocalEtcd describes that kubeadm should run an etcd cluster locally -type LocalEtcd struct { - - // Image specifies which container image to use for running etcd. - // If empty, automatically populated by kubeadm using the image - // repository and default etcd version. - Image string `json:"image"` - - // DataDir is the directory etcd will place its data. - // Defaults to "/var/lib/etcd". - DataDir string `json:"dataDir"` - - // ExtraArgs are extra arguments provided to the etcd binary - // when run inside a static pod. - ExtraArgs map[string]string `json:"extraArgs,omitempty"` - - // ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert. - ServerCertSANs []string `json:"serverCertSANs,omitempty"` - // PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert. - PeerCertSANs []string `json:"peerCertSANs,omitempty"` -} - -// ExternalEtcd describes an external etcd cluster -type ExternalEtcd struct { - - // Endpoints of etcd members. Useful for using external etcd. - // If not provided, kubeadm will run etcd in a static pod. - Endpoints []string `json:"endpoints"` - // CAFile is an SSL Certificate Authority file used to secure etcd communication. - CAFile string `json:"caFile"` - // CertFile is an SSL certification file used to secure etcd communication. - CertFile string `json:"certFile"` - // KeyFile is an SSL key file used to secure etcd communication. - KeyFile string `json:"keyFile"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// JoinConfiguration contains elements describing a particular node. -// TODO: This struct should be replaced by dynamic kubelet configuration. -type JoinConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // NodeRegistration holds fields that relate to registering the new master node to the cluster - NodeRegistration NodeRegistrationOptions `json:"nodeRegistration"` - - // CACertPath is the path to the SSL certificate authority used to - // secure comunications between node and master. - // Defaults to "/etc/kubernetes/pki/ca.crt". - CACertPath string `json:"caCertPath"` - // DiscoveryFile is a file or url to a kubeconfig file from which to - // load cluster information. - DiscoveryFile string `json:"discoveryFile"` - // DiscoveryToken is a token used to validate cluster information - // fetched from the master. - DiscoveryToken string `json:"discoveryToken"` - // DiscoveryTokenAPIServers is a set of IPs to API servers from which info - // will be fetched. Currently we only pay attention to one API server but - // hope to support >1 in the future. - DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers,omitempty"` - // DiscoveryTimeout modifies the discovery timeout - DiscoveryTimeout *metav1.Duration `json:"discoveryTimeout,omitempty"` - // TLSBootstrapToken is a token used for TLS bootstrapping. - // Defaults to Token. - TLSBootstrapToken string `json:"tlsBootstrapToken"` - // Token is used for both discovery and TLS bootstrapping. - Token string `json:"token"` - - // ClusterName is the name for the cluster in kubeconfig. - ClusterName string `json:"clusterName,omitempty"` - - // DiscoveryTokenCACertHashes specifies a set of public key pins to verify - // when token-based discovery is used. The root CA found during discovery - // must match one of these values. Specifying an empty set disables root CA - // pinning, which can be unsafe. Each hash is specified as ":", - // where the only currently supported type is "sha256". This is a hex-encoded - // SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded - // ASN.1. These hashes can be calculated using, for example, OpenSSL: - // openssl x509 -pubkey -in ca.crt openssl rsa -pubin -outform der 2>&/dev/null | openssl dgst -sha256 -hex - DiscoveryTokenCACertHashes []string `json:"discoveryTokenCACertHashes,omitempty"` - - // DiscoveryTokenUnsafeSkipCAVerification allows token-based discovery - // without CA verification via DiscoveryTokenCACertHashes. This can weaken - // the security of kubeadm since other nodes can impersonate the master. - DiscoveryTokenUnsafeSkipCAVerification bool `json:"discoveryTokenUnsafeSkipCAVerification"` - - // ControlPlane flag specifies that the joining node should host an additional - // control plane instance. - ControlPlane bool `json:"controlPlane,omitempty"` - - // AdvertiseAddress sets the IP address for the API server to advertise; the - // API server will be installed only on nodes hosting an additional control plane instance. - AdvertiseAddress string `json:"advertiseAddress,omitempty"` - - // BindPort sets the secure port for the API Server to bind to. - // Defaults to 6443. - BindPort int32 `json:"bindPort,omitempty"` - - // FeatureGates enabled by the user. - FeatureGates map[string]bool `json:"featureGates,omitempty"` -} - -// KubeletConfiguration contains elements describing initial remote configuration of kubelet. -type KubeletConfiguration struct { - BaseConfig *kubeletconfigv1beta1.KubeletConfiguration `json:"baseConfig,omitempty"` -} - -// HostPathMount contains elements describing volumes that are mounted from the -// host. -type HostPathMount struct { - // Name of the volume inside the pod template. - Name string `json:"name"` - // HostPath is the path in the host that will be mounted inside - // the pod. - HostPath string `json:"hostPath"` - // MountPath is the path inside the pod where hostPath will be mounted. - MountPath string `json:"mountPath"` - // Writable controls write access to the volume - Writable bool `json:"writable,omitempty"` - // PathType is the type of the HostPath. - PathType v1.HostPathType `json:"pathType,omitempty"` -} - -// KubeProxy contains elements describing the proxy configuration. -type KubeProxy struct { - Config *kubeproxyconfigv1alpha1.KubeProxyConfiguration `json:"config,omitempty"` -} - -// AuditPolicyConfiguration holds the options for configuring the api server audit policy. -type AuditPolicyConfiguration struct { - // Path is the local path to an audit policy. - Path string `json:"path"` - // LogDir is the local path to the directory where logs should be stored. - LogDir string `json:"logDir"` - // LogMaxAge is the number of days logs will be stored for. 0 indicates forever. - LogMaxAge *int32 `json:"logMaxAge,omitempty"` - //TODO(chuckha) add other options for audit policy. -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go deleted file mode 100644 index 33f3a607e5a..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.conversion.go +++ /dev/null @@ -1,479 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*AuditPolicyConfiguration)(nil), (*kubeadm.AuditPolicyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(a.(*AuditPolicyConfiguration), b.(*kubeadm.AuditPolicyConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.AuditPolicyConfiguration)(nil), (*AuditPolicyConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration(a.(*kubeadm.AuditPolicyConfiguration), b.(*AuditPolicyConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*BootstrapToken)(nil), (*kubeadm.BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_BootstrapToken_To_kubeadm_BootstrapToken(a.(*BootstrapToken), b.(*kubeadm.BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapToken)(nil), (*BootstrapToken)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_BootstrapToken_To_v1alpha2_BootstrapToken(a.(*kubeadm.BootstrapToken), b.(*BootstrapToken), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*BootstrapTokenString)(nil), (*kubeadm.BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(a.(*BootstrapTokenString), b.(*kubeadm.BootstrapTokenString), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.BootstrapTokenString)(nil), (*BootstrapTokenString)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_BootstrapTokenString_To_v1alpha2_BootstrapTokenString(a.(*kubeadm.BootstrapTokenString), b.(*BootstrapTokenString), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Etcd)(nil), (*kubeadm.Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_Etcd_To_kubeadm_Etcd(a.(*Etcd), b.(*kubeadm.Etcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.Etcd)(nil), (*Etcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_Etcd_To_v1alpha2_Etcd(a.(*kubeadm.Etcd), b.(*Etcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ExternalEtcd)(nil), (*kubeadm.ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(a.(*ExternalEtcd), b.(*kubeadm.ExternalEtcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.ExternalEtcd)(nil), (*ExternalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(a.(*kubeadm.ExternalEtcd), b.(*ExternalEtcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*HostPathMount)(nil), (*kubeadm.HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount(a.(*HostPathMount), b.(*kubeadm.HostPathMount), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.HostPathMount)(nil), (*HostPathMount)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount(a.(*kubeadm.HostPathMount), b.(*HostPathMount), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InitConfiguration)(nil), (*kubeadm.InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_InitConfiguration_To_kubeadm_InitConfiguration(a.(*InitConfiguration), b.(*kubeadm.InitConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.InitConfiguration)(nil), (*InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_InitConfiguration_To_v1alpha2_InitConfiguration(a.(*kubeadm.InitConfiguration), b.(*InitConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*JoinConfiguration)(nil), (*kubeadm.JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_JoinConfiguration_To_kubeadm_JoinConfiguration(a.(*JoinConfiguration), b.(*kubeadm.JoinConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.JoinConfiguration)(nil), (*JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_JoinConfiguration_To_v1alpha2_JoinConfiguration(a.(*kubeadm.JoinConfiguration), b.(*JoinConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*LocalEtcd)(nil), (*kubeadm.LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(a.(*LocalEtcd), b.(*kubeadm.LocalEtcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.LocalEtcd)(nil), (*LocalEtcd)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(a.(*kubeadm.LocalEtcd), b.(*LocalEtcd), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Networking)(nil), (*kubeadm.Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_Networking_To_kubeadm_Networking(a.(*Networking), b.(*kubeadm.Networking), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.Networking)(nil), (*Networking)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_Networking_To_v1alpha2_Networking(a.(*kubeadm.Networking), b.(*Networking), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeRegistrationOptions)(nil), (*kubeadm.NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(a.(*NodeRegistrationOptions), b.(*kubeadm.NodeRegistrationOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*kubeadm.NodeRegistrationOptions)(nil), (*NodeRegistrationOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(a.(*kubeadm.NodeRegistrationOptions), b.(*NodeRegistrationOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*kubeadm.InitConfiguration)(nil), (*InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_InitConfiguration_To_v1alpha2_InitConfiguration(a.(*kubeadm.InitConfiguration), b.(*InitConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*kubeadm.JoinConfiguration)(nil), (*JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_kubeadm_JoinConfiguration_To_v1alpha2_JoinConfiguration(a.(*kubeadm.JoinConfiguration), b.(*JoinConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*InitConfiguration)(nil), (*kubeadm.InitConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_InitConfiguration_To_kubeadm_InitConfiguration(a.(*InitConfiguration), b.(*kubeadm.InitConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*JoinConfiguration)(nil), (*kubeadm.JoinConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha2_JoinConfiguration_To_kubeadm_JoinConfiguration(a.(*JoinConfiguration), b.(*kubeadm.JoinConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(in *AuditPolicyConfiguration, out *kubeadm.AuditPolicyConfiguration, s conversion.Scope) error { - out.Path = in.Path - out.LogDir = in.LogDir - out.LogMaxAge = (*int32)(unsafe.Pointer(in.LogMaxAge)) - return nil -} - -// Convert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration is an autogenerated conversion function. -func Convert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(in *AuditPolicyConfiguration, out *kubeadm.AuditPolicyConfiguration, s conversion.Scope) error { - return autoConvert_v1alpha2_AuditPolicyConfiguration_To_kubeadm_AuditPolicyConfiguration(in, out, s) -} - -func autoConvert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration(in *kubeadm.AuditPolicyConfiguration, out *AuditPolicyConfiguration, s conversion.Scope) error { - out.Path = in.Path - out.LogDir = in.LogDir - out.LogMaxAge = (*int32)(unsafe.Pointer(in.LogMaxAge)) - return nil -} - -// Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration is an autogenerated conversion function. -func Convert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration(in *kubeadm.AuditPolicyConfiguration, out *AuditPolicyConfiguration, s conversion.Scope) error { - return autoConvert_kubeadm_AuditPolicyConfiguration_To_v1alpha2_AuditPolicyConfiguration(in, out, s) -} - -func autoConvert_v1alpha2_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error { - out.Token = (*kubeadm.BootstrapTokenString)(unsafe.Pointer(in.Token)) - out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) - out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) - out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - return nil -} - -// Convert_v1alpha2_BootstrapToken_To_kubeadm_BootstrapToken is an autogenerated conversion function. -func Convert_v1alpha2_BootstrapToken_To_kubeadm_BootstrapToken(in *BootstrapToken, out *kubeadm.BootstrapToken, s conversion.Scope) error { - return autoConvert_v1alpha2_BootstrapToken_To_kubeadm_BootstrapToken(in, out, s) -} - -func autoConvert_kubeadm_BootstrapToken_To_v1alpha2_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - out.Token = (*BootstrapTokenString)(unsafe.Pointer(in.Token)) - out.Description = in.Description - out.TTL = (*v1.Duration)(unsafe.Pointer(in.TTL)) - out.Expires = (*v1.Time)(unsafe.Pointer(in.Expires)) - out.Usages = *(*[]string)(unsafe.Pointer(&in.Usages)) - out.Groups = *(*[]string)(unsafe.Pointer(&in.Groups)) - return nil -} - -// Convert_kubeadm_BootstrapToken_To_v1alpha2_BootstrapToken is an autogenerated conversion function. -func Convert_kubeadm_BootstrapToken_To_v1alpha2_BootstrapToken(in *kubeadm.BootstrapToken, out *BootstrapToken, s conversion.Scope) error { - return autoConvert_kubeadm_BootstrapToken_To_v1alpha2_BootstrapToken(in, out, s) -} - -func autoConvert_v1alpha2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error { - out.ID = in.ID - out.Secret = in.Secret - return nil -} - -// Convert_v1alpha2_BootstrapTokenString_To_kubeadm_BootstrapTokenString is an autogenerated conversion function. -func Convert_v1alpha2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in *BootstrapTokenString, out *kubeadm.BootstrapTokenString, s conversion.Scope) error { - return autoConvert_v1alpha2_BootstrapTokenString_To_kubeadm_BootstrapTokenString(in, out, s) -} - -func autoConvert_kubeadm_BootstrapTokenString_To_v1alpha2_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error { - out.ID = in.ID - out.Secret = in.Secret - return nil -} - -// Convert_kubeadm_BootstrapTokenString_To_v1alpha2_BootstrapTokenString is an autogenerated conversion function. -func Convert_kubeadm_BootstrapTokenString_To_v1alpha2_BootstrapTokenString(in *kubeadm.BootstrapTokenString, out *BootstrapTokenString, s conversion.Scope) error { - return autoConvert_kubeadm_BootstrapTokenString_To_v1alpha2_BootstrapTokenString(in, out, s) -} - -func autoConvert_v1alpha2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { - out.Local = (*kubeadm.LocalEtcd)(unsafe.Pointer(in.Local)) - out.External = (*kubeadm.ExternalEtcd)(unsafe.Pointer(in.External)) - return nil -} - -// Convert_v1alpha2_Etcd_To_kubeadm_Etcd is an autogenerated conversion function. -func Convert_v1alpha2_Etcd_To_kubeadm_Etcd(in *Etcd, out *kubeadm.Etcd, s conversion.Scope) error { - return autoConvert_v1alpha2_Etcd_To_kubeadm_Etcd(in, out, s) -} - -func autoConvert_kubeadm_Etcd_To_v1alpha2_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { - out.Local = (*LocalEtcd)(unsafe.Pointer(in.Local)) - out.External = (*ExternalEtcd)(unsafe.Pointer(in.External)) - return nil -} - -// Convert_kubeadm_Etcd_To_v1alpha2_Etcd is an autogenerated conversion function. -func Convert_kubeadm_Etcd_To_v1alpha2_Etcd(in *kubeadm.Etcd, out *Etcd, s conversion.Scope) error { - return autoConvert_kubeadm_Etcd_To_v1alpha2_Etcd(in, out, s) -} - -func autoConvert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - return nil -} - -// Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd is an autogenerated conversion function. -func Convert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in *ExternalEtcd, out *kubeadm.ExternalEtcd, s conversion.Scope) error { - return autoConvert_v1alpha2_ExternalEtcd_To_kubeadm_ExternalEtcd(in, out, s) -} - -func autoConvert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { - out.Endpoints = *(*[]string)(unsafe.Pointer(&in.Endpoints)) - out.CAFile = in.CAFile - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - return nil -} - -// Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd is an autogenerated conversion function. -func Convert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in *kubeadm.ExternalEtcd, out *ExternalEtcd, s conversion.Scope) error { - return autoConvert_kubeadm_ExternalEtcd_To_v1alpha2_ExternalEtcd(in, out, s) -} - -func autoConvert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { - out.Name = in.Name - out.HostPath = in.HostPath - out.MountPath = in.MountPath - out.Writable = in.Writable - out.PathType = corev1.HostPathType(in.PathType) - return nil -} - -// Convert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount is an autogenerated conversion function. -func Convert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount(in *HostPathMount, out *kubeadm.HostPathMount, s conversion.Scope) error { - return autoConvert_v1alpha2_HostPathMount_To_kubeadm_HostPathMount(in, out, s) -} - -func autoConvert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error { - out.Name = in.Name - out.HostPath = in.HostPath - out.MountPath = in.MountPath - out.Writable = in.Writable - out.PathType = corev1.HostPathType(in.PathType) - return nil -} - -// Convert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount is an autogenerated conversion function. -func Convert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount(in *kubeadm.HostPathMount, out *HostPathMount, s conversion.Scope) error { - return autoConvert_kubeadm_HostPathMount_To_v1alpha2_HostPathMount(in, out, s) -} - -func autoConvert_v1alpha2_InitConfiguration_To_kubeadm_InitConfiguration(in *InitConfiguration, out *kubeadm.InitConfiguration, s conversion.Scope) error { - out.BootstrapTokens = *(*[]kubeadm.BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) - if err := Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { - return err - } - // WARNING: in.API requires manual conversion: does not exist in peer-type - // WARNING: in.KubeProxy requires manual conversion: does not exist in peer-type - // WARNING: in.Etcd requires manual conversion: does not exist in peer-type - // WARNING: in.KubeletConfiguration requires manual conversion: does not exist in peer-type - // WARNING: in.Networking requires manual conversion: does not exist in peer-type - // WARNING: in.KubernetesVersion requires manual conversion: does not exist in peer-type - // WARNING: in.APIServerExtraArgs requires manual conversion: does not exist in peer-type - // WARNING: in.ControllerManagerExtraArgs requires manual conversion: does not exist in peer-type - // WARNING: in.SchedulerExtraArgs requires manual conversion: does not exist in peer-type - // WARNING: in.APIServerExtraVolumes requires manual conversion: does not exist in peer-type - // WARNING: in.ControllerManagerExtraVolumes requires manual conversion: does not exist in peer-type - // WARNING: in.SchedulerExtraVolumes requires manual conversion: does not exist in peer-type - // WARNING: in.APIServerCertSANs requires manual conversion: does not exist in peer-type - // WARNING: in.CertificatesDir requires manual conversion: does not exist in peer-type - // WARNING: in.ImageRepository requires manual conversion: does not exist in peer-type - // WARNING: in.UnifiedControlPlaneImage requires manual conversion: does not exist in peer-type - // WARNING: in.AuditPolicyConfiguration requires manual conversion: does not exist in peer-type - // WARNING: in.FeatureGates requires manual conversion: does not exist in peer-type - // WARNING: in.ClusterName requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_kubeadm_InitConfiguration_To_v1alpha2_InitConfiguration(in *kubeadm.InitConfiguration, out *InitConfiguration, s conversion.Scope) error { - // WARNING: in.ClusterConfiguration requires manual conversion: does not exist in peer-type - out.BootstrapTokens = *(*[]BootstrapToken)(unsafe.Pointer(&in.BootstrapTokens)) - if err := Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { - return err - } - // WARNING: in.APIEndpoint requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_v1alpha2_JoinConfiguration_To_kubeadm_JoinConfiguration(in *JoinConfiguration, out *kubeadm.JoinConfiguration, s conversion.Scope) error { - if err := Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { - return err - } - out.CACertPath = in.CACertPath - out.DiscoveryFile = in.DiscoveryFile - out.DiscoveryToken = in.DiscoveryToken - out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) - out.DiscoveryTimeout = (*v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.TLSBootstrapToken = in.TLSBootstrapToken - out.Token = in.Token - out.ClusterName = in.ClusterName - out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) - out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification - out.ControlPlane = in.ControlPlane - // WARNING: in.AdvertiseAddress requires manual conversion: does not exist in peer-type - // WARNING: in.BindPort requires manual conversion: does not exist in peer-type - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - return nil -} - -func autoConvert_kubeadm_JoinConfiguration_To_v1alpha2_JoinConfiguration(in *kubeadm.JoinConfiguration, out *JoinConfiguration, s conversion.Scope) error { - if err := Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(&in.NodeRegistration, &out.NodeRegistration, s); err != nil { - return err - } - out.CACertPath = in.CACertPath - out.DiscoveryFile = in.DiscoveryFile - out.DiscoveryToken = in.DiscoveryToken - out.DiscoveryTokenAPIServers = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenAPIServers)) - out.DiscoveryTimeout = (*v1.Duration)(unsafe.Pointer(in.DiscoveryTimeout)) - out.TLSBootstrapToken = in.TLSBootstrapToken - out.Token = in.Token - out.ClusterName = in.ClusterName - out.DiscoveryTokenCACertHashes = *(*[]string)(unsafe.Pointer(&in.DiscoveryTokenCACertHashes)) - out.DiscoveryTokenUnsafeSkipCAVerification = in.DiscoveryTokenUnsafeSkipCAVerification - out.ControlPlane = in.ControlPlane - // WARNING: in.APIEndpoint requires manual conversion: does not exist in peer-type - out.FeatureGates = *(*map[string]bool)(unsafe.Pointer(&in.FeatureGates)) - return nil -} - -func autoConvert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { - out.Image = in.Image - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) - return nil -} - -// Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd is an autogenerated conversion function. -func Convert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in *LocalEtcd, out *kubeadm.LocalEtcd, s conversion.Scope) error { - return autoConvert_v1alpha2_LocalEtcd_To_kubeadm_LocalEtcd(in, out, s) -} - -func autoConvert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { - out.Image = in.Image - out.DataDir = in.DataDir - out.ExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.ExtraArgs)) - out.ServerCertSANs = *(*[]string)(unsafe.Pointer(&in.ServerCertSANs)) - out.PeerCertSANs = *(*[]string)(unsafe.Pointer(&in.PeerCertSANs)) - return nil -} - -// Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd is an autogenerated conversion function. -func Convert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in *kubeadm.LocalEtcd, out *LocalEtcd, s conversion.Scope) error { - return autoConvert_kubeadm_LocalEtcd_To_v1alpha2_LocalEtcd(in, out, s) -} - -func autoConvert_v1alpha2_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error { - out.ServiceSubnet = in.ServiceSubnet - out.PodSubnet = in.PodSubnet - out.DNSDomain = in.DNSDomain - return nil -} - -// Convert_v1alpha2_Networking_To_kubeadm_Networking is an autogenerated conversion function. -func Convert_v1alpha2_Networking_To_kubeadm_Networking(in *Networking, out *kubeadm.Networking, s conversion.Scope) error { - return autoConvert_v1alpha2_Networking_To_kubeadm_Networking(in, out, s) -} - -func autoConvert_kubeadm_Networking_To_v1alpha2_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error { - out.ServiceSubnet = in.ServiceSubnet - out.PodSubnet = in.PodSubnet - out.DNSDomain = in.DNSDomain - return nil -} - -// Convert_kubeadm_Networking_To_v1alpha2_Networking is an autogenerated conversion function. -func Convert_kubeadm_Networking_To_v1alpha2_Networking(in *kubeadm.Networking, out *Networking, s conversion.Scope) error { - return autoConvert_kubeadm_Networking_To_v1alpha2_Networking(in, out, s) -} - -func autoConvert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { - out.Name = in.Name - out.CRISocket = in.CRISocket - out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) - out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) - return nil -} - -// Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions is an autogenerated conversion function. -func Convert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in *NodeRegistrationOptions, out *kubeadm.NodeRegistrationOptions, s conversion.Scope) error { - return autoConvert_v1alpha2_NodeRegistrationOptions_To_kubeadm_NodeRegistrationOptions(in, out, s) -} - -func autoConvert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { - out.Name = in.Name - out.CRISocket = in.CRISocket - out.Taints = *(*[]corev1.Taint)(unsafe.Pointer(&in.Taints)) - out.KubeletExtraArgs = *(*map[string]string)(unsafe.Pointer(&in.KubeletExtraArgs)) - return nil -} - -// Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions is an autogenerated conversion function. -func Convert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in *kubeadm.NodeRegistrationOptions, out *NodeRegistrationOptions, s conversion.Scope) error { - return autoConvert_kubeadm_NodeRegistrationOptions_To_v1alpha2_NodeRegistrationOptions(in, out, s) -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go deleted file mode 100644 index bcbdbcd190c..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.deepcopy.go +++ /dev/null @@ -1,441 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/kube-proxy/config/v1alpha1" - v1beta1 "k8s.io/kubelet/config/v1beta1" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *API) DeepCopyInto(out *API) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new API. -func (in *API) DeepCopy() *API { - if in == nil { - return nil - } - out := new(API) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditPolicyConfiguration) DeepCopyInto(out *AuditPolicyConfiguration) { - *out = *in - if in.LogMaxAge != nil { - in, out := &in.LogMaxAge, &out.LogMaxAge - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditPolicyConfiguration. -func (in *AuditPolicyConfiguration) DeepCopy() *AuditPolicyConfiguration { - if in == nil { - return nil - } - out := new(AuditPolicyConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BootstrapToken) DeepCopyInto(out *BootstrapToken) { - *out = *in - if in.Token != nil { - in, out := &in.Token, &out.Token - *out = new(BootstrapTokenString) - **out = **in - } - if in.TTL != nil { - in, out := &in.TTL, &out.TTL - *out = new(v1.Duration) - **out = **in - } - if in.Expires != nil { - in, out := &in.Expires, &out.Expires - *out = (*in).DeepCopy() - } - if in.Usages != nil { - in, out := &in.Usages, &out.Usages - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapToken. -func (in *BootstrapToken) DeepCopy() *BootstrapToken { - if in == nil { - return nil - } - out := new(BootstrapToken) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BootstrapTokenString) DeepCopyInto(out *BootstrapTokenString) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapTokenString. -func (in *BootstrapTokenString) DeepCopy() *BootstrapTokenString { - if in == nil { - return nil - } - out := new(BootstrapTokenString) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Etcd) DeepCopyInto(out *Etcd) { - *out = *in - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalEtcd) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalEtcd) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Etcd. -func (in *Etcd) DeepCopy() *Etcd { - if in == nil { - return nil - } - out := new(Etcd) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { - *out = *in - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEtcd. -func (in *ExternalEtcd) DeepCopy() *ExternalEtcd { - if in == nil { - return nil - } - out := new(ExternalEtcd) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathMount) DeepCopyInto(out *HostPathMount) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathMount. -func (in *HostPathMount) DeepCopy() *HostPathMount { - if in == nil { - return nil - } - out := new(HostPathMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitConfiguration) DeepCopyInto(out *InitConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.BootstrapTokens != nil { - in, out := &in.BootstrapTokens, &out.BootstrapTokens - *out = make([]BootstrapToken, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) - out.API = in.API - in.KubeProxy.DeepCopyInto(&out.KubeProxy) - in.Etcd.DeepCopyInto(&out.Etcd) - in.KubeletConfiguration.DeepCopyInto(&out.KubeletConfiguration) - out.Networking = in.Networking - if in.APIServerExtraArgs != nil { - in, out := &in.APIServerExtraArgs, &out.APIServerExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ControllerManagerExtraArgs != nil { - in, out := &in.ControllerManagerExtraArgs, &out.ControllerManagerExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.SchedulerExtraArgs != nil { - in, out := &in.SchedulerExtraArgs, &out.SchedulerExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.APIServerExtraVolumes != nil { - in, out := &in.APIServerExtraVolumes, &out.APIServerExtraVolumes - *out = make([]HostPathMount, len(*in)) - copy(*out, *in) - } - if in.ControllerManagerExtraVolumes != nil { - in, out := &in.ControllerManagerExtraVolumes, &out.ControllerManagerExtraVolumes - *out = make([]HostPathMount, len(*in)) - copy(*out, *in) - } - if in.SchedulerExtraVolumes != nil { - in, out := &in.SchedulerExtraVolumes, &out.SchedulerExtraVolumes - *out = make([]HostPathMount, len(*in)) - copy(*out, *in) - } - if in.APIServerCertSANs != nil { - in, out := &in.APIServerCertSANs, &out.APIServerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } - in.AuditPolicyConfiguration.DeepCopyInto(&out.AuditPolicyConfiguration) - if in.FeatureGates != nil { - in, out := &in.FeatureGates, &out.FeatureGates - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitConfiguration. -func (in *InitConfiguration) DeepCopy() *InitConfiguration { - if in == nil { - return nil - } - out := new(InitConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JoinConfiguration) DeepCopyInto(out *JoinConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.NodeRegistration.DeepCopyInto(&out.NodeRegistration) - if in.DiscoveryTokenAPIServers != nil { - in, out := &in.DiscoveryTokenAPIServers, &out.DiscoveryTokenAPIServers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DiscoveryTimeout != nil { - in, out := &in.DiscoveryTimeout, &out.DiscoveryTimeout - *out = new(v1.Duration) - **out = **in - } - if in.DiscoveryTokenCACertHashes != nil { - in, out := &in.DiscoveryTokenCACertHashes, &out.DiscoveryTokenCACertHashes - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.FeatureGates != nil { - in, out := &in.FeatureGates, &out.FeatureGates - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JoinConfiguration. -func (in *JoinConfiguration) DeepCopy() *JoinConfiguration { - if in == nil { - return nil - } - out := new(JoinConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JoinConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeProxy) DeepCopyInto(out *KubeProxy) { - *out = *in - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(v1alpha1.KubeProxyConfiguration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeProxy. -func (in *KubeProxy) DeepCopy() *KubeProxy { - if in == nil { - return nil - } - out := new(KubeProxy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) { - *out = *in - if in.BaseConfig != nil { - in, out := &in.BaseConfig, &out.BaseConfig - *out = new(v1beta1.KubeletConfiguration) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration. -func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration { - if in == nil { - return nil - } - out := new(KubeletConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalEtcd) DeepCopyInto(out *LocalEtcd) { - *out = *in - if in.ExtraArgs != nil { - in, out := &in.ExtraArgs, &out.ExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ServerCertSANs != nil { - in, out := &in.ServerCertSANs, &out.ServerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PeerCertSANs != nil { - in, out := &in.PeerCertSANs, &out.PeerCertSANs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalEtcd. -func (in *LocalEtcd) DeepCopy() *LocalEtcd { - if in == nil { - return nil - } - out := new(LocalEtcd) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Networking) DeepCopyInto(out *Networking) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. -func (in *Networking) DeepCopy() *Networking { - if in == nil { - return nil - } - out := new(Networking) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeRegistrationOptions) DeepCopyInto(out *NodeRegistrationOptions) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]corev1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.KubeletExtraArgs != nil { - in, out := &in.KubeletExtraArgs, &out.KubeletExtraArgs - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRegistrationOptions. -func (in *NodeRegistrationOptions) DeepCopy() *NodeRegistrationOptions { - if in == nil { - return nil - } - out := new(NodeRegistrationOptions) - in.DeepCopyInto(out) - return out -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go deleted file mode 100644 index dd4522ee412..00000000000 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/zz_generated.defaults.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1beta1 "k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1" - v1alpha1 "k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&InitConfiguration{}, func(obj interface{}) { SetObjectDefaults_InitConfiguration(obj.(*InitConfiguration)) }) - scheme.AddTypeDefaultingFunc(&JoinConfiguration{}, func(obj interface{}) { SetObjectDefaults_JoinConfiguration(obj.(*JoinConfiguration)) }) - return nil -} - -func SetObjectDefaults_InitConfiguration(in *InitConfiguration) { - SetDefaults_InitConfiguration(in) - for i := range in.BootstrapTokens { - a := &in.BootstrapTokens[i] - SetDefaults_BootstrapToken(a) - } - SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) - if in.KubeProxy.Config != nil { - v1alpha1.SetDefaults_KubeProxyConfiguration(in.KubeProxy.Config) - } - if in.KubeletConfiguration.BaseConfig != nil { - v1beta1.SetDefaults_KubeletConfiguration(in.KubeletConfiguration.BaseConfig) - } -} - -func SetObjectDefaults_JoinConfiguration(in *JoinConfiguration) { - SetDefaults_JoinConfiguration(in) - SetDefaults_NodeRegistrationOptions(&in.NodeRegistration) -} diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/defaults.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/defaults.go index da1ff468458..c833b82deeb 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/defaults.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/defaults.go @@ -33,7 +33,7 @@ const ( // DefaultClusterDNSIP defines default DNS IP DefaultClusterDNSIP = "10.96.0.10" // DefaultKubernetesVersion defines default kubernetes version - DefaultKubernetesVersion = "stable-1.11" + DefaultKubernetesVersion = "stable-1" // DefaultAPIBindPort defines default API port DefaultAPIBindPort = 6443 // DefaultCertificatesDir defines default certificate directory diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go index 5186e1fba14..7d1dff8c76c 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go +++ b/cmd/kubeadm/app/apis/kubeadm/v1alpha3/doc.go @@ -14,14 +14,54 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:defaulter-gen=TypeMeta +// +groupName=kubeadm.k8s.io +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm + // Package v1alpha3 is the API (config file) for driving the kubeadm binary. // Some of these options are also available as command line flags, but -// the preferred way to configure kubeadm is to pass a YAML file in with the -// --config option. +// the preferred way to configure kubeadm is to pass a single YAML file with +// multiple configuration types in with the --config option. // -// A fully populated example of the schema: +// kubeadm defines several configuration types: +// * InitConfiguration +// * JoinConfiguration +// * ClusterConfiguration +// +// InitConfiguration and JoinConfiguration cannot share a single YAML file, +// however it is expected that InitConfiguration and ClusterConfiguration will +// share a single YAML file. +// +// A fully populated example of a single YAML file containing multiple +// configuration types to be used during a `kubeadm init` run. // apiVersion: kubeadm.k8s.io/v1alpha3 // kind: InitConfiguration +// bootstrapTokens: +// - token: "9a08jv.c0izixklcxtmnze7" +// description: "kubeadm bootstrap token" +// ttl: "24h" +// - token: "783bde.3f89s0fje9f38fhf" +// description: "another bootstrap token" +// usages: +// - signing +// groups: +// - system:anonymous +// nodeRegistration: +// name: "ec2-10-100-0-1" +// criSocket: "/var/run/dockershim.sock" +// taints: +// - key: "kubeadmNode" +// value: "master" +// effect: "NoSchedule" +// kubeletExtraArgs: +// cgroupDriver: "cgroupfs" +// apiEndpoint: +// advertiseAddress: "10.100.0.1" +// bindPort: 6443 +// --- +// apiVersion: kubeadm.k8s.io/v1alpha3 +// kind: ClusterConfiguration // etcd: // # one of local or external // local: @@ -45,20 +85,38 @@ limitations under the License. // podSubnet: "10.100.0.1/24" // dnsDomain: "cluster.local" // kubernetesVersion: "v1.12.0" -// ControlPlaneEndpoint: "10.100.0.1:6443" +// controlPlaneEndpoint: "10.100.0.1:6443" // apiServerExtraArgs: // authorization-mode: "Node,RBAC" // controlManagerExtraArgs: // node-cidr-mask-size: 20 // schedulerExtraArgs: // address: "10.100.0.1" +// apiServerExtraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// writable: true +// pathType: File +// controllerManagerExtraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// writable: true +// pathType: File +// schedulerExtraVolumes: +// - name: "some-volume" +// hostPath: "/etc/some-path" +// mountPath: "/etc/some-pod-path" +// writable: true +// pathType: File // apiServerCertSANs: // - "10.100.1.1" // - "ec2-10-100-0-1.compute-1.amazonaws.com" -// certificateDirectory: "/etc/kubernetes/pki" +// certificatesDir: "/etc/kubernetes/pki" // imageRepository: "k8s.gcr.io" // unifiedControlPlaneImage: "k8s.gcr.io/controlplane:v1.12.0" -// auditPolicyConfiguration: +// auditPolicy: // # https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy // path: "/var/log/audit/audit.json" // logDir: "/var/log/audit" @@ -66,33 +124,7 @@ limitations under the License. // featureGates: // selfhosting: false // clusterName: "example-cluster" -// bootstrapTokens: -// - token: "9a08jv.c0izixklcxtmnze7" -// description: "kubeadm bootstrap token" -// ttl: "24h" -// usages: -// - "authentication" -// - "signing" -// groups: -// - "system:bootstrappers:kubeadm:default-node-token" -// nodeRegistration: -// name: "ec2-10-100-0-1" -// criSocket: "/var/run/dockershim.sock" -// taints: -// - key: "kubeadmNode" -// value: "master" -// effect: "NoSchedule" -// kubeletExtraArgs: -// cgroupDriver: "cgroupfs" -// apiEndpoint: -// advertiseAddress: "10.100.0.1" -// bindPort: 6443 // // TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future // (probably as part of Bootstrap Tokens going GA). It should not be staged under the kubeadm API as it is now. -// -// +k8s:defaulter-gen=TypeMeta -// +groupName=kubeadm.k8s.io -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm package v1alpha3 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" diff --git a/cmd/kubeadm/app/cmd/BUILD b/cmd/kubeadm/app/cmd/BUILD index d0f731775b1..c28a079c36d 100644 --- a/cmd/kubeadm/app/cmd/BUILD +++ b/cmd/kubeadm/app/cmd/BUILD @@ -22,7 +22,6 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/scheme:go_default_library", - "//cmd/kubeadm/app/apis/kubeadm/v1alpha2:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/v1alpha3:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", @@ -97,7 +96,6 @@ go_test( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", "//cmd/kubeadm/app/preflight:go_default_library", - "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/runtime:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/cmd/kubeadm/app/cmd/config.go b/cmd/kubeadm/app/cmd/config.go index a73989305df..ade117e6404 100644 --- a/cmd/kubeadm/app/cmd/config.go +++ b/cmd/kubeadm/app/cmd/config.go @@ -32,7 +32,6 @@ import ( clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" - kubeadmapiv1alpha2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha2" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" phaseutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases" @@ -125,13 +124,13 @@ func NewCmdConfigPrintDefault(out io.Writer) *cobra.Command { func getDefaultAPIObjectBytes(apiObject string) ([]byte, error) { switch apiObject { - case constants.InitConfigurationKind, constants.MasterConfigurationKind: + case constants.InitConfigurationKind: return getDefaultInitConfigBytes(constants.InitConfigurationKind) case constants.ClusterConfigurationKind: return getDefaultInitConfigBytes(constants.ClusterConfigurationKind) - case constants.JoinConfigurationKind, constants.NodeConfigurationKind: + case constants.JoinConfigurationKind: return getDefaultNodeConfigBytes() default: @@ -154,8 +153,9 @@ func getSupportedAPIObjects() []string { } // getAllAPIObjectNames returns currently supported API object names and their historical aliases +// NB. currently there is no historical supported API objects, but we keep this function for future changes func getAllAPIObjectNames() []string { - historicAPIObjectAliases := []string{constants.MasterConfigurationKind} + historicAPIObjectAliases := []string{} objects := getSupportedAPIObjects() objects = append(objects, historicAPIObjectAliases...) return objects @@ -228,7 +228,6 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command { locally in the CLI tool without ever touching anything in the cluster. In this version of kubeadm, the following API versions are supported: - %s - - %s Further, kubeadm can only write out config of version %q, but read both types. So regardless of what version you pass to the --old-config parameter here, the API object will be @@ -237,7 +236,7 @@ func NewCmdConfigMigrate(out io.Writer) *cobra.Command { In other words, the output of this command is what kubeadm actually would read internally if you submitted this file to "kubeadm init" - `), kubeadmapiv1alpha2.SchemeGroupVersion.String(), kubeadmapiv1alpha3.SchemeGroupVersion.String(), kubeadmapiv1alpha3.SchemeGroupVersion.String()), + `), kubeadmapiv1alpha3.SchemeGroupVersion.String(), kubeadmapiv1alpha3.SchemeGroupVersion.String()), Run: func(cmd *cobra.Command, args []string) { if len(oldCfgPath) == 0 { kubeadmutil.CheckErr(fmt.Errorf("The --old-config flag is mandatory")) @@ -380,7 +379,7 @@ func RunConfigView(out io.Writer, client clientset.Interface) error { return err } // No need to append \n as that already exists in the ConfigMap - fmt.Fprintf(out, "%s", cfgConfigMap.Data[constants.InitConfigurationConfigMapKey]) + fmt.Fprintf(out, "%s", cfgConfigMap.Data[constants.ClusterConfigurationConfigMapKey]) return nil } diff --git a/cmd/kubeadm/app/cmd/config_test.go b/cmd/kubeadm/app/cmd/config_test.go index f001e174e97..5c9fb66859c 100644 --- a/cmd/kubeadm/app/cmd/config_test.go +++ b/cmd/kubeadm/app/cmd/config_test.go @@ -29,7 +29,6 @@ import ( kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd" "k8s.io/kubernetes/cmd/kubeadm/app/features" - "k8s.io/kubernetes/cmd/kubeadm/app/util/config" utilruntime "k8s.io/kubernetes/cmd/kubeadm/app/util/runtime" "k8s.io/utils/exec" fakeexec "k8s.io/utils/exec/testing" @@ -39,7 +38,7 @@ const ( defaultNumberOfImages = 8 // dummyKubernetesVersion is just used for unit testing, in order to not make // kubeadm lookup dl.k8s.io to resolve what the latest stable release is - dummyKubernetesVersion = "v1.10.0" + dummyKubernetesVersion = "v1.11.0" ) func TestNewCmdConfigImagesList(t *testing.T) { @@ -65,12 +64,12 @@ func TestImagesListRunWithCustomConfigPath(t *testing.T) { name: "set k8s version", expectedImageCount: defaultNumberOfImages, expectedImageSubstrings: []string{ - ":v1.10.1", + ":v1.11.1", }, configContents: []byte(dedent.Dedent(` apiVersion: kubeadm.k8s.io/v1alpha3 kind: ClusterConfiguration - kubernetesVersion: v1.10.1 + kubernetesVersion: v1.11.1 `)), }, { @@ -232,28 +231,32 @@ func TestImagesPull(t *testing.T) { } func TestMigrate(t *testing.T) { - cfg := []byte(dedent.Dedent(` - # This is intentionally testing an old API version and the old kind naming and making sure the output is correct - apiVersion: kubeadm.k8s.io/v1alpha2 - kind: MasterConfiguration - kubernetesVersion: v1.10.0 - `)) - configFile, cleanup := tempConfig(t, cfg) - defer cleanup() + /* + TODO: refactor this to test v1alpha3 --> v1beta1 after introducing v1beta1 - var output bytes.Buffer - command := cmd.NewCmdConfigMigrate(&output) - if err := command.Flags().Set("old-config", configFile); err != nil { - t.Fatalf("failed to set old-config flag") - } - newConfigPath := filepath.Join(filepath.Dir(configFile), "new-migrated-config") - if err := command.Flags().Set("new-config", newConfigPath); err != nil { - t.Fatalf("failed to set new-config flag") - } - command.Run(nil, nil) - if _, err := config.ConfigFileAndDefaultsToInternalConfig(newConfigPath, &kubeadmapiv1alpha3.InitConfiguration{}); err != nil { - t.Fatalf("Could not read output back into internal type: %v", err) - } + cfg := []byte(dedent.Dedent(` + # This is intentionally testing an old API version and the old kind naming and making sure the output is correct + apiVersion: kubeadm.k8s.io/v1alpha2 + kind: MasterConfiguration + kubernetesVersion: v1.11.0 + `)) + configFile, cleanup := tempConfig(t, cfg) + defer cleanup() + + var output bytes.Buffer + command := cmd.NewCmdConfigMigrate(&output) + if err := command.Flags().Set("old-config", configFile); err != nil { + t.Fatalf("failed to set old-config flag") + } + newConfigPath := filepath.Join(filepath.Dir(configFile), "new-migrated-config") + if err := command.Flags().Set("new-config", newConfigPath); err != nil { + t.Fatalf("failed to set new-config flag") + } + command.Run(nil, nil) + if _, err := config.ConfigFileAndDefaultsToInternalConfig(newConfigPath, &kubeadmapiv1alpha3.InitConfiguration{}); err != nil { + t.Fatalf("Could not read output back into internal type: %v", err) + } + */ } // Returns the name of the file created and a cleanup callback diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 4e22321d216..13bd385d78f 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -331,7 +331,7 @@ func (j *Join) Run(out io.Writer) error { } // run kubeadm init preflight checks for checking all the prequisites - glog.Infoln("[join] running pre-flight checks before initializing the new control plane instance") + fmt.Printf("[join] running pre-flight checks before initializing the new control plane instance\n") preflight.RunInitMasterChecks(utilsexec.New(), initConfiguration, j.ignorePreflightErrors) // Prepares the node for hosting a new control plane instance by writing necessary diff --git a/cmd/kubeadm/app/cmd/phases/BUILD b/cmd/kubeadm/app/cmd/phases/BUILD index fd843dbbf76..f40701b473f 100644 --- a/cmd/kubeadm/app/cmd/phases/BUILD +++ b/cmd/kubeadm/app/cmd/phases/BUILD @@ -25,6 +25,7 @@ go_library( "//cmd/kubeadm/app/apis/kubeadm/v1alpha3:go_default_library", "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/cmd/options:go_default_library", + "//cmd/kubeadm/app/cmd/phases/certs:go_default_library", "//cmd/kubeadm/app/cmd/util:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/features:go_default_library", @@ -48,8 +49,8 @@ go_library( "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//pkg/util/normalizer:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/bootstrap/token/api:go_default_library", diff --git a/cmd/kubeadm/app/cmd/phases/certs.go b/cmd/kubeadm/app/cmd/phases/certs.go index a9d83a59e47..96455c99b06 100644 --- a/cmd/kubeadm/app/cmd/phases/certs.go +++ b/cmd/kubeadm/app/cmd/phases/certs.go @@ -26,6 +26,7 @@ import ( kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + certscmdphase "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/certs" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" @@ -120,7 +121,10 @@ func getCertsSubCommands(defaultKubernetesVersion string) []*cobra.Command { } addFlags(saCmd, &cfgPath, cfg, false) - subCmds := []*cobra.Command{allCmd, saCmd} + // "renew" command + renewCmd := certscmdphase.NewCmdCertsRenewal() + + subCmds := []*cobra.Command{allCmd, saCmd, renewCmd} certTree, err := certsphase.GetDefaultCertList().AsMap().CertTree() kubeadmutil.CheckErr(err) diff --git a/cmd/kubeadm/app/cmd/phases/certs/renew.go b/cmd/kubeadm/app/cmd/phases/certs/renew.go index 9f56ef45f10..a13fce0ad87 100644 --- a/cmd/kubeadm/app/cmd/phases/certs/renew.go +++ b/cmd/kubeadm/app/cmd/phases/certs/renew.go @@ -50,7 +50,7 @@ var ( func NewCmdCertsRenewal() *cobra.Command { cmd := &cobra.Command{ Use: "renew", - Short: "Renews certificates for a kubernetes cluster", + Short: "Renews certificates for a Kubernetes cluster", Long: cmdutil.MacroCommandLongDescription, RunE: cmdutil.SubCmdRunE("renew"), } @@ -110,7 +110,7 @@ func addFlags(cmd *cobra.Command, cfg *renewConfig) { options.AddConfigFlag(cmd.Flags(), &cfg.cfgPath) options.AddCertificateDirFlag(cmd.Flags(), &cfg.cfg.CertificatesDir) options.AddKubeConfigFlag(cmd.Flags(), &cfg.kubeconfigPath) - cmd.Flags().BoolVar(&cfg.useAPI, "use-api", cfg.useAPI, "Use the kubernetes certificate API to renew certificates") + cmd.Flags().BoolVar(&cfg.useAPI, "use-api", cfg.useAPI, "Use the Kubernetes certificate API to renew certificates") } // generateCertCommand takes mostly strings instead of structs to avoid using structs in a for loop diff --git a/cmd/kubeadm/app/cmd/phases/certs_test.go b/cmd/kubeadm/app/cmd/phases/certs_test.go index 0311ccdff7c..09fe2572f97 100644 --- a/cmd/kubeadm/app/cmd/phases/certs_test.go +++ b/cmd/kubeadm/app/cmd/phases/certs_test.go @@ -32,7 +32,7 @@ import ( ) // phaseTestK8sVersion is a fake kubernetes version to use when testing -const phaseTestK8sVersion = "v1.10.0" +const phaseTestK8sVersion = "v1.11.0" func TestCertsSubCommandsHasFlags(t *testing.T) { diff --git a/cmd/kubeadm/app/cmd/phases/controlplane_test.go b/cmd/kubeadm/app/cmd/phases/controlplane_test.go index c8c5d3b8e01..a9ad56b3af7 100644 --- a/cmd/kubeadm/app/cmd/phases/controlplane_test.go +++ b/cmd/kubeadm/app/cmd/phases/controlplane_test.go @@ -90,7 +90,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "all", additionalFlags: []string{ - "--kubernetes-version=v1.10.0", + "--kubernetes-version=v1.11.0", "--apiserver-advertise-address=1.2.3.4", "--apiserver-bind-port=6443", "--service-cidr=1.2.3.4/16", @@ -105,7 +105,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "apiserver", additionalFlags: []string{ - "--kubernetes-version=v1.10.0", + "--kubernetes-version=v1.11.0", "--apiserver-advertise-address=1.2.3.4", "--apiserver-bind-port=6443", "--service-cidr=1.2.3.4/16", @@ -115,7 +115,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "controller-manager", additionalFlags: []string{ - "--kubernetes-version=v1.10.0", + "--kubernetes-version=v1.11.0", "--pod-network-cidr=1.2.3.4/16", }, expectedFiles: []string{"kube-controller-manager.yaml"}, @@ -123,7 +123,7 @@ func TestControlPlaneCreateFilesWithFlags(t *testing.T) { { command: "scheduler", additionalFlags: []string{ - "--kubernetes-version=v1.10.0", + "--kubernetes-version=v1.11.0", }, expectedFiles: []string{"kube-scheduler.yaml"}, }, diff --git a/cmd/kubeadm/app/cmd/phases/kubelet.go b/cmd/kubeadm/app/cmd/phases/kubelet.go index 2c14dd7563a..c70c29a1aa8 100644 --- a/cmd/kubeadm/app/cmd/phases/kubelet.go +++ b/cmd/kubeadm/app/cmd/phases/kubelet.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" @@ -33,7 +34,6 @@ import ( configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig" "k8s.io/kubernetes/pkg/util/normalizer" - "k8s.io/kubernetes/pkg/util/version" utilsexec "k8s.io/utils/exec" ) @@ -81,7 +81,7 @@ var ( kubeadm alpha phase kubelet config download # Downloads the kubelet configuration from the ConfigMap in the cluster. Uses a specific desired kubelet version. - kubeadm alpha phase kubelet config download --kubelet-version v1.11.0 + kubeadm alpha phase kubelet config download --kubelet-version v1.12.0 `) kubeletConfigWriteToDiskLongDesc = normalizer.LongDesc(` @@ -104,7 +104,7 @@ var ( kubeletConfigEnableDynamicExample = normalizer.Examples(` # Enables dynamic kubelet configuration for a Node. - kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version v1.11.0 + kubeadm alpha phase kubelet enable-dynamic-config --node-name node-1 --kubelet-version v1.12.0 WARNING: This feature is still experimental, and disabled by default. Enable only if you know what you are doing, as it may have surprising side-effects at this stage. diff --git a/cmd/kubeadm/app/cmd/reset_test.go b/cmd/kubeadm/app/cmd/reset_test.go index a4399397063..fa2ab84fbc6 100644 --- a/cmd/kubeadm/app/cmd/reset_test.go +++ b/cmd/kubeadm/app/cmd/reset_test.go @@ -68,22 +68,6 @@ func TestNewReset(t *testing.T) { NewReset(in, ignorePreflightErrorsSet, forceReset, certsDir, criSocketPath) } -func TestNewCmdReset(t *testing.T) { - var out io.Writer - var in io.Reader - cmd := NewCmdReset(in, out) - - tmpDir, err := ioutil.TempDir("", "kubeadm-reset-test") - if err != nil { - t.Errorf("Unable to create temporary directory: %v", err) - } - args := []string{"--ignore-preflight-errors=all", "--cert-dir=" + tmpDir, "--force"} - cmd.SetArgs(args) - if err := cmd.Execute(); err != nil { - t.Errorf("Cannot execute reset command: %v", err) - } -} - func TestConfigDirCleaner(t *testing.T) { tests := map[string]struct { resetDir string diff --git a/cmd/kubeadm/app/cmd/token_test.go b/cmd/kubeadm/app/cmd/token_test.go index 3d8dd15e93f..f27f8d5bd1c 100644 --- a/cmd/kubeadm/app/cmd/token_test.go +++ b/cmd/kubeadm/app/cmd/token_test.go @@ -182,7 +182,7 @@ func TestRunCreateToken(t *testing.T) { ClusterConfiguration: kubeadmapiv1alpha3.ClusterConfiguration{ // KubernetesVersion is not used, but we set this explicitly to avoid // the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig - KubernetesVersion: "v1.10.0", + KubernetesVersion: "v1.11.0", }, BootstrapTokens: []kubeadmapiv1alpha3.BootstrapToken{ { diff --git a/cmd/kubeadm/app/cmd/upgrade/BUILD b/cmd/kubeadm/app/cmd/upgrade/BUILD index 73a1684fe16..085d18eedbb 100644 --- a/cmd/kubeadm/app/cmd/upgrade/BUILD +++ b/cmd/kubeadm/app/cmd/upgrade/BUILD @@ -32,11 +32,11 @@ go_library( "//cmd/kubeadm/app/util/kubeconfig:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/normalizer:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/discovery/fake:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/cmd/kubeadm/app/cmd/upgrade/apply.go b/cmd/kubeadm/app/cmd/upgrade/apply.go index c4d846346c0..1135a527311 100644 --- a/cmd/kubeadm/app/cmd/upgrade/apply.go +++ b/cmd/kubeadm/app/cmd/upgrade/apply.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" @@ -38,7 +39,6 @@ import ( configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" - "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/cmd/kubeadm/app/cmd/upgrade/diff.go b/cmd/kubeadm/app/cmd/upgrade/diff.go index 343001da2c8..56686903f5e 100644 --- a/cmd/kubeadm/app/cmd/upgrade/diff.go +++ b/cmd/kubeadm/app/cmd/upgrade/diff.go @@ -25,6 +25,7 @@ import ( "github.com/pmezard/go-difflib/difflib" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/version" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" @@ -32,7 +33,6 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" - "k8s.io/kubernetes/pkg/util/version" ) type diffFlags struct { diff --git a/cmd/kubeadm/app/cmd/upgrade/node.go b/cmd/kubeadm/app/cmd/upgrade/node.go index c68a295a42a..346e83b70f0 100644 --- a/cmd/kubeadm/app/cmd/upgrade/node.go +++ b/cmd/kubeadm/app/cmd/upgrade/node.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -36,7 +37,6 @@ import ( dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/normalizer" - "k8s.io/kubernetes/pkg/util/version" ) var ( @@ -48,11 +48,11 @@ var ( upgradeNodeConfigExample = normalizer.Examples(` # Downloads the kubelet configuration from the ConfigMap in the cluster. Uses a specific desired kubelet version. - kubeadm upgrade node config --kubelet-version v1.11.0 + kubeadm upgrade node config --kubelet-version v1.12.0 # Simulates the downloading of the kubelet configuration from the ConfigMap in the cluster with a specific desired # version. Does not change any state locally on the node. - kubeadm upgrade node config --kubelet-version v1.11.0 --dry-run + kubeadm upgrade node config --kubelet-version v1.12.0 --dry-run `) ) diff --git a/cmd/kubeadm/app/cmd/upgrade/plan.go b/cmd/kubeadm/app/cmd/upgrade/plan.go index f14c640ae50..f814361eef0 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan.go @@ -27,6 +27,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/version" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -34,7 +35,6 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" - "k8s.io/kubernetes/pkg/util/version" ) type planFlags struct { diff --git a/cmd/kubeadm/app/cmd/upgrade/plan_test.go b/cmd/kubeadm/app/cmd/upgrade/plan_test.go index 9fcd83f4f11..acd30985e2c 100644 --- a/cmd/kubeadm/app/cmd/upgrade/plan_test.go +++ b/cmd/kubeadm/app/cmd/upgrade/plan_test.go @@ -147,7 +147,7 @@ _____________________________________________________________________ KubeVersion: "v1.9.0", KubeadmVersion: "v1.9.0", DNSType: "kube-dns", - DNSVersion: "1.14.10", + DNSVersion: "1.14.13", EtcdVersion: "3.1.12", }, }, @@ -163,7 +163,7 @@ API Server v1.8.3 v1.9.0 Controller Manager v1.8.3 v1.9.0 Scheduler v1.8.3 v1.9.0 Kube Proxy v1.8.3 v1.9.0 -Kube DNS 1.14.5 1.14.10 +Kube DNS 1.14.5 1.14.13 Etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -213,7 +213,7 @@ _____________________________________________________________________ KubeVersion: "v1.9.0", KubeadmVersion: "v1.9.0", DNSType: "kube-dns", - DNSVersion: "1.14.10", + DNSVersion: "1.14.13", EtcdVersion: "3.1.12", }, }, @@ -249,7 +249,7 @@ API Server v1.8.3 v1.9.0 Controller Manager v1.8.3 v1.9.0 Scheduler v1.8.3 v1.9.0 Kube Proxy v1.8.3 v1.9.0 -Kube DNS 1.14.5 1.14.10 +Kube DNS 1.14.5 1.14.13 Etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -281,7 +281,7 @@ _____________________________________________________________________ KubeVersion: "v1.9.0-beta.1", KubeadmVersion: "v1.9.0-beta.1", DNSType: "kube-dns", - DNSVersion: "1.14.10", + DNSVersion: "1.14.13", EtcdVersion: "3.1.12", }, }, @@ -297,7 +297,7 @@ API Server v1.8.5 v1.9.0-beta.1 Controller Manager v1.8.5 v1.9.0-beta.1 Scheduler v1.8.5 v1.9.0-beta.1 Kube Proxy v1.8.5 v1.9.0-beta.1 -Kube DNS 1.14.5 1.14.10 +Kube DNS 1.14.5 1.14.13 Etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: @@ -329,7 +329,7 @@ _____________________________________________________________________ KubeVersion: "v1.9.0-rc.1", KubeadmVersion: "v1.9.0-rc.1", DNSType: "kube-dns", - DNSVersion: "1.14.10", + DNSVersion: "1.14.13", EtcdVersion: "3.1.12", }, }, @@ -345,7 +345,7 @@ API Server v1.8.5 v1.9.0-rc.1 Controller Manager v1.8.5 v1.9.0-rc.1 Scheduler v1.8.5 v1.9.0-rc.1 Kube Proxy v1.8.5 v1.9.0-rc.1 -Kube DNS 1.14.5 1.14.10 +Kube DNS 1.14.5 1.14.13 Etcd 3.0.17 3.1.12 You can now apply the upgrade by executing the following command: diff --git a/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml index b5e5611c0fe..c9ae6e2bd36 100644 --- a/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml +++ b/cmd/kubeadm/app/cmd/upgrade/testdata/diff_master_config.yaml @@ -1,3 +1,3 @@ -apiVersion: kubeadm.k8s.io/v1alpha2 -kind: MasterConfiguration -kubernetesVersion: 1.11.0 +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +kubernetesVersion: 1.12.0 diff --git a/cmd/kubeadm/app/componentconfigs/BUILD b/cmd/kubeadm/app/componentconfigs/BUILD index 7fa9d69de0f..556fcaf4e28 100644 --- a/cmd/kubeadm/app/componentconfigs/BUILD +++ b/cmd/kubeadm/app/componentconfigs/BUILD @@ -20,13 +20,13 @@ go_library( "//pkg/kubelet/apis/config/validation:go_default_library", "//pkg/proxy/apis/config:go_default_library", "//pkg/proxy/apis/config/validation:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/kube-proxy/config/v1alpha1:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", @@ -61,9 +61,9 @@ go_test( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//pkg/kubelet/apis/config:go_default_library", "//pkg/proxy/apis/config:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", diff --git a/cmd/kubeadm/app/componentconfigs/config.go b/cmd/kubeadm/app/componentconfigs/config.go index df4443cd5c1..784ab523409 100644 --- a/cmd/kubeadm/app/componentconfigs/config.go +++ b/cmd/kubeadm/app/componentconfigs/config.go @@ -21,11 +21,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/kubernetes/pkg/util/version" ) // GetFromKubeletConfigMap returns the pointer to the ComponentConfig API object read from the kubelet-config-version diff --git a/cmd/kubeadm/app/componentconfigs/config_test.go b/cmd/kubeadm/app/componentconfigs/config_test.go index 9372e6f46ef..0408f32f18f 100644 --- a/cmd/kubeadm/app/componentconfigs/config_test.go +++ b/cmd/kubeadm/app/componentconfigs/config_test.go @@ -21,11 +21,11 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" clientsetfake "k8s.io/client-go/kubernetes/fake" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - "k8s.io/kubernetes/pkg/util/version" ) var cfgFiles = map[string][]byte{ diff --git a/cmd/kubeadm/app/componentconfigs/registrations.go b/cmd/kubeadm/app/componentconfigs/registrations.go index 7542d7793c4..80870023a8c 100644 --- a/cmd/kubeadm/app/componentconfigs/registrations.go +++ b/cmd/kubeadm/app/componentconfigs/registrations.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeproxyconfigv1alpha1 "k8s.io/kube-proxy/config/v1alpha1" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" @@ -27,7 +28,6 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" - "k8s.io/kubernetes/pkg/util/version" ) // AddToSchemeFunc is a function that adds known types and API GroupVersions to a scheme diff --git a/cmd/kubeadm/app/constants/BUILD b/cmd/kubeadm/app/constants/BUILD index 81dd79dab2e..00753bb266b 100644 --- a/cmd/kubeadm/app/constants/BUILD +++ b/cmd/kubeadm/app/constants/BUILD @@ -12,8 +12,8 @@ go_library( importpath = "k8s.io/kubernetes/cmd/kubeadm/app/constants", deps = [ "//pkg/registry/core/service/ipallocator:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/tools/bootstrap/token/api:go_default_library", ], ) @@ -35,5 +35,5 @@ go_test( name = "go_default_test", srcs = ["constants_test.go"], embed = [":go_default_library"], - deps = ["//pkg/util/version:go_default_library"], + deps = ["//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library"], ) diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index f8dc6dea91b..f0da8c4055f 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -26,9 +26,9 @@ import ( "time" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/version" bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" - "k8s.io/kubernetes/pkg/util/version" ) // KubernetesDir is the directory kubernetes owns for storing various configuration files @@ -192,10 +192,6 @@ const ( // TODO: Rename this to KubeadmConfigConfigMap InitConfigurationConfigMap = "kubeadm-config" - // InitConfigurationConfigMapKey specifies in what ConfigMap key the master configuration should be stored - // TODO: This was used in v1.11 with vi1alpha2 config and older. Remove in v1.13 - InitConfigurationConfigMapKey = "MasterConfiguration" - // ClusterConfigurationConfigMapKey specifies in what ConfigMap key the cluster configuration should be stored ClusterConfigurationConfigMapKey = "ClusterConfiguration" @@ -246,6 +242,9 @@ const ( // DefaultEtcdVersion indicates the default etcd version that kubeadm uses DefaultEtcdVersion = "3.2.24" + // PauseVersion indicates the default pause image version for kubeadm + PauseVersion = "3.1" + // Etcd defines variable used internally when referring to etcd component Etcd = "etcd" // KubeAPIServer defines variable used internally when referring to kube-apiserver component @@ -299,7 +298,7 @@ const ( LeaseEndpointReconcilerType = "lease" // KubeDNSVersion is the version of kube-dns to be deployed if it is used - KubeDNSVersion = "1.14.10" + KubeDNSVersion = "1.14.13" // CoreDNSVersion is the version of CoreDNS to be deployed if it is used CoreDNSVersion = "1.2.2" @@ -310,17 +309,9 @@ const ( // InitConfigurationKind is the string kind value for the InitConfiguration struct InitConfigurationKind = "InitConfiguration" - // MasterConfigurationKind is the string kind value for the v1alpha2-named MasterConfiguration struct - // In v1alpha3 and higher, this struct is now named InitConfiguration - MasterConfigurationKind = "MasterConfiguration" - // JoinConfigurationKind is the string kind value for the JoinConfiguration struct JoinConfigurationKind = "JoinConfiguration" - // NodeConfigurationKind is the string kind value for the v1alpha2-named NodeConfiguration struct - // In v1alpha3 and higher, this struct is now named JoinConfiguration - NodeConfigurationKind = "NodeConfiguration" - // YAMLDocumentSeparator is the separator for YAML documents // TODO: Find a better place for this constant YAMLDocumentSeparator = "---\n" @@ -352,10 +343,10 @@ var ( MasterComponents = []string{KubeAPIServer, KubeControllerManager, KubeScheduler} // MinimumControlPlaneVersion specifies the minimum control plane version kubeadm can deploy - MinimumControlPlaneVersion = version.MustParseSemantic("v1.10.0") + MinimumControlPlaneVersion = version.MustParseSemantic("v1.11.0") // MinimumKubeletVersion specifies the minimum version of kubelet which kubeadm supports - MinimumKubeletVersion = version.MustParseSemantic("v1.10.0") + MinimumKubeletVersion = version.MustParseSemantic("v1.11.0") // SupportedEtcdVersion lists officially supported etcd versions with corresponding kubernetes releases SupportedEtcdVersion = map[uint8]string{ diff --git a/cmd/kubeadm/app/constants/constants_test.go b/cmd/kubeadm/app/constants/constants_test.go index 317406f0fb0..1882a284103 100644 --- a/cmd/kubeadm/app/constants/constants_test.go +++ b/cmd/kubeadm/app/constants/constants_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/util/version" + "k8s.io/apimachinery/pkg/util/version" ) func TestGetStaticPodDirectory(t *testing.T) { diff --git a/cmd/kubeadm/app/features/BUILD b/cmd/kubeadm/app/features/BUILD index 801a284e66e..75a5e600954 100644 --- a/cmd/kubeadm/app/features/BUILD +++ b/cmd/kubeadm/app/features/BUILD @@ -11,7 +11,7 @@ go_library( srcs = ["features.go"], importpath = "k8s.io/kubernetes/cmd/kubeadm/app/features", deps = [ - "//pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) @@ -34,7 +34,7 @@ go_test( srcs = ["features_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/cmd/kubeadm/app/features/features.go b/cmd/kubeadm/app/features/features.go index c87f24072f3..7b4ba857746 100644 --- a/cmd/kubeadm/app/features/features.go +++ b/cmd/kubeadm/app/features/features.go @@ -22,8 +22,8 @@ import ( "strconv" "strings" + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/cmd/kubeadm/app/features/features_test.go b/cmd/kubeadm/app/features/features_test.go index 2f0a0e5873b..a1b24dd23a3 100644 --- a/cmd/kubeadm/app/features/features_test.go +++ b/cmd/kubeadm/app/features/features_test.go @@ -20,11 +20,11 @@ import ( "reflect" "testing" + "k8s.io/apimachinery/pkg/util/version" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/util/version" ) -var TestMinVersion = version.MustParseSemantic("v1.10.0-alpha.1") +var TestMinVersion = version.MustParseSemantic("v1.11.0-alpha.1") func TestKnownFeatures(t *testing.T) { var someFeatures = FeatureList{ @@ -144,12 +144,12 @@ func TestValidateVersion(t *testing.T) { }, { //min version but correct value given requestedFeatures: map[string]bool{"feature2": true}, - requestedVersion: "v1.10.0", + requestedVersion: "v1.11.0", expectedError: false, }, { //min version and incorrect value given requestedFeatures: map[string]bool{"feature2": true}, - requestedVersion: "v1.9.2", + requestedVersion: "v1.10.2", expectedError: true, }, } diff --git a/cmd/kubeadm/app/images/images.go b/cmd/kubeadm/app/images/images.go index 053a7407f10..3d520cb3cee 100644 --- a/cmd/kubeadm/app/images/images.go +++ b/cmd/kubeadm/app/images/images.go @@ -18,7 +18,6 @@ package images import ( "fmt" - "runtime" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -31,11 +30,6 @@ func GetGenericImage(prefix, image, tag string) string { return fmt.Sprintf("%s/%s:%s", prefix, image, tag) } -// GetGenericArchImage generates and returns an image based on the current runtime arch -func GetGenericArchImage(prefix, image, tag string) string { - return fmt.Sprintf("%s/%s-%s:%s", prefix, image, runtime.GOARCH, tag) -} - // GetKubeControlPlaneImage generates and returns the image for the core Kubernetes components or returns the unified control plane image if specified func GetKubeControlPlaneImage(image string, cfg *kubeadmapi.ClusterConfiguration) string { if cfg.UnifiedControlPlaneImage != "" { @@ -68,7 +62,7 @@ func GetAllImages(cfg *kubeadmapi.ClusterConfiguration) []string { imgs = append(imgs, GetKubeControlPlaneImage(constants.KubeProxy, cfg)) // pause, etcd and kube-dns are not available on the ci image repository so use the default image repository. - imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "pause", "3.1")) + imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "pause", constants.PauseVersion)) // if etcd is not external then add the image as it will be required if cfg.Etcd.Local != nil { @@ -79,9 +73,9 @@ func GetAllImages(cfg *kubeadmapi.ClusterConfiguration) []string { if features.Enabled(cfg.FeatureGates, features.CoreDNS) { imgs = append(imgs, GetGenericImage(cfg.ImageRepository, constants.CoreDNS, constants.CoreDNSVersion)) } else { - imgs = append(imgs, GetGenericArchImage(cfg.ImageRepository, "k8s-dns-kube-dns", constants.KubeDNSVersion)) - imgs = append(imgs, GetGenericArchImage(cfg.ImageRepository, "k8s-dns-sidecar", constants.KubeDNSVersion)) - imgs = append(imgs, GetGenericArchImage(cfg.ImageRepository, "k8s-dns-dnsmasq-nanny", constants.KubeDNSVersion)) + imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "k8s-dns-kube-dns", constants.KubeDNSVersion)) + imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "k8s-dns-sidecar", constants.KubeDNSVersion)) + imgs = append(imgs, GetGenericImage(cfg.ImageRepository, "k8s-dns-dnsmasq-nanny", constants.KubeDNSVersion)) } return imgs diff --git a/cmd/kubeadm/app/images/images_test.go b/cmd/kubeadm/app/images/images_test.go index 2a2eb195f14..a7bba7477f5 100644 --- a/cmd/kubeadm/app/images/images_test.go +++ b/cmd/kubeadm/app/images/images_test.go @@ -18,7 +18,6 @@ package images import ( "fmt" - "runtime" "strings" "testing" @@ -45,19 +44,6 @@ func TestGetGenericImage(t *testing.T) { } } -func TestGetGenericArchImage(t *testing.T) { - const ( - prefix = "foo" - image = "bar" - tag = "baz" - ) - expected := fmt.Sprintf("%s/%s-%s:%s", prefix, image, runtime.GOARCH, tag) - actual := GetGenericArchImage(prefix, image, tag) - if actual != expected { - t.Errorf("failed GetGenericArchImage:\n\texpected: %s\n\t actual: %s", expected, actual) - } -} - func TestGetKubeControlPlaneImage(t *testing.T) { var tests = []struct { image string diff --git a/cmd/kubeadm/app/phases/addons/dns/dns.go b/cmd/kubeadm/app/phases/addons/dns/dns.go index 5480c90c0b1..4f259600c8e 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns.go @@ -19,7 +19,6 @@ package dns import ( "encoding/json" "fmt" - "runtime" "strings" "github.com/mholt/caddy/caddyfile" @@ -97,9 +96,8 @@ func kubeDNSAddon(cfg *kubeadmapi.InitConfiguration, client clientset.Interface) } dnsDeploymentBytes, err := kubeadmutil.ParseTemplate(KubeDNSDeployment, - struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ + struct{ ImageRepository, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ ImageRepository: cfg.ImageRepository, - Arch: runtime.GOARCH, Version: kubeadmconstants.KubeDNSVersion, DNSBindAddr: dnsBindAddr, DNSProbeAddr: dnsProbeAddr, diff --git a/cmd/kubeadm/app/phases/addons/dns/dns_test.go b/cmd/kubeadm/app/phases/addons/dns/dns_test.go index 111f444f100..bcc637f0ee3 100644 --- a/cmd/kubeadm/app/phases/addons/dns/dns_test.go +++ b/cmd/kubeadm/app/phases/addons/dns/dns_test.go @@ -95,9 +95,8 @@ func TestCompileManifests(t *testing.T) { }{ { manifest: KubeDNSDeployment, - data: struct{ ImageRepository, Arch, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ + data: struct{ ImageRepository, Version, DNSBindAddr, DNSProbeAddr, DNSDomain, MasterTaintKey string }{ ImageRepository: "foo", - Arch: "foo", Version: "foo", DNSBindAddr: "foo", DNSProbeAddr: "foo", diff --git a/cmd/kubeadm/app/phases/addons/dns/manifests.go b/cmd/kubeadm/app/phases/addons/dns/manifests.go index 1cab230cd82..d5a03302fc5 100644 --- a/cmd/kubeadm/app/phases/addons/dns/manifests.go +++ b/cmd/kubeadm/app/phases/addons/dns/manifests.go @@ -50,7 +50,7 @@ spec: optional: true containers: - name: kubedns - image: {{ .ImageRepository }}/k8s-dns-kube-dns-{{ .Arch }}:{{ .Version }} + image: {{ .ImageRepository }}/k8s-dns-kube-dns:{{ .Version }} imagePullPolicy: IfNotPresent resources: # TODO: Set memory limits when we've profiled the container for large @@ -102,7 +102,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: {{ .ImageRepository }}/k8s-dns-dnsmasq-nanny-{{ .Arch }}:{{ .Version }} + image: {{ .ImageRepository }}/k8s-dns-dnsmasq-nanny:{{ .Version }} imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -143,7 +143,7 @@ spec: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }} + image: {{ .ImageRepository }}/k8s-dns-sidecar:{{ .Version }} imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -174,8 +174,6 @@ spec: operator: Exists - key: {{ .MasterTaintKey }} effect: NoSchedule - nodeSelector: - beta.kubernetes.io/arch: {{ .Arch }} ` // KubeDNSService is the kube-dns Service manifest diff --git a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go index ff74040d82b..60f8a1f34ea 100644 --- a/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go +++ b/cmd/kubeadm/app/phases/addons/proxy/proxy_test.go @@ -183,7 +183,7 @@ func TestEnsureProxyAddon(t *testing.T) { PodSubnet: "5.6.7.8/24", }, ImageRepository: "someRepo", - KubernetesVersion: "v1.10.0", + KubernetesVersion: "v1.11.0", }, } diff --git a/cmd/kubeadm/app/phases/controlplane/BUILD b/cmd/kubeadm/app/phases/controlplane/BUILD index f7f925cbcdb..b0a46adc91a 100644 --- a/cmd/kubeadm/app/phases/controlplane/BUILD +++ b/cmd/kubeadm/app/phases/controlplane/BUILD @@ -20,9 +20,9 @@ go_test( "//cmd/kubeadm/app/phases/certs:go_default_library", "//cmd/kubeadm/test:go_default_library", "//pkg/kubeapiserver/authorizer/modes:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], ) @@ -44,9 +44,9 @@ go_library( "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/staticpod:go_default_library", "//pkg/kubeapiserver/authorizer/modes:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) diff --git a/cmd/kubeadm/app/phases/controlplane/manifests.go b/cmd/kubeadm/app/phases/controlplane/manifests.go index 19d6fd9590f..9039abc0490 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests.go @@ -27,6 +27,7 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -36,7 +37,6 @@ import ( kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" staticpodutil "k8s.io/kubernetes/cmd/kubeadm/app/util/staticpod" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" - "k8s.io/kubernetes/pkg/util/version" ) // CreateInitStaticPodManifestFiles will write all static pod manifest files needed to bring up the control plane. @@ -292,10 +292,14 @@ func getControllerManagerCommand(cfg *kubeadmapi.InitConfiguration, k8sVersion * "cluster-signing-key-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CAKeyName), "use-service-account-credentials": "true", "controllers": "*,bootstrapsigner,tokencleaner", - "authentication-kubeconfig": filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName), - "authorization-kubeconfig": filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName), - "client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName), - "requestheader-client-ca-file": filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertName), + } + + //add the extra arguments for v1.12+ + if k8sVersion.Major() >= 1 && k8sVersion.Minor() >= 12 { + defaultArguments["authentication-kubeconfig"] = filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName) + defaultArguments["authorization-kubeconfig"] = filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ControllerManagerKubeConfigFileName) + defaultArguments["client-ca-file"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.CACertName) + defaultArguments["requestheader-client-ca-file"] = filepath.Join(cfg.CertificatesDir, kubeadmconstants.FrontProxyCACertName) } // If using external CA, pass empty string to controller manager instead of ca.key/ca.crt path, diff --git a/cmd/kubeadm/app/phases/controlplane/manifests_test.go b/cmd/kubeadm/app/phases/controlplane/manifests_test.go index a3662267be0..41e1945cd7b 100644 --- a/cmd/kubeadm/app/phases/controlplane/manifests_test.go +++ b/cmd/kubeadm/app/phases/controlplane/manifests_test.go @@ -26,12 +26,12 @@ import ( "testing" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes" - "k8s.io/kubernetes/pkg/util/version" testutil "k8s.io/kubernetes/cmd/kubeadm/test" utilpointer "k8s.io/utils/pointer" @@ -663,10 +663,10 @@ func TestGetControllerManagerCommand(t *testing.T) { expected []string }{ { - name: "custom certs dir", + name: "custom certs dir for v1.12.0-beta.2", cfg: &kubeadmapi.ClusterConfiguration{ CertificatesDir: testCertsDir, - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", }, expected: []string{ "kube-controller-manager", @@ -686,11 +686,11 @@ func TestGetControllerManagerCommand(t *testing.T) { }, }, { - name: "custom cloudprovider", + name: "custom cloudprovider for v1.12.0-beta.2", cfg: &kubeadmapi.ClusterConfiguration{ Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, CertificatesDir: testCertsDir, - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", }, expected: []string{ "kube-controller-manager", @@ -713,12 +713,12 @@ func TestGetControllerManagerCommand(t *testing.T) { }, }, { - name: "custom extra-args", + name: "custom extra-args for v1.12.0-beta.2", cfg: &kubeadmapi.ClusterConfiguration{ Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, ControllerManagerExtraArgs: map[string]string{"node-cidr-mask-size": "20"}, CertificatesDir: testCertsDir, - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", }, expected: []string{ "kube-controller-manager", @@ -741,11 +741,11 @@ func TestGetControllerManagerCommand(t *testing.T) { }, }, { - name: "custom IPv6 networking", + name: "custom IPv6 networking for v1.12.0-beta.2", cfg: &kubeadmapi.ClusterConfiguration{ Networking: kubeadmapi.Networking{PodSubnet: "2001:db8::/64"}, CertificatesDir: testCertsDir, - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", }, expected: []string{ "kube-controller-manager", @@ -767,6 +767,95 @@ func TestGetControllerManagerCommand(t *testing.T) { "--node-cidr-mask-size=80", }, }, + { + name: "custom certs dir for v1.11.3", + cfg: &kubeadmapi.ClusterConfiguration{ + CertificatesDir: testCertsDir, + KubernetesVersion: "v1.11.3", + }, + expected: []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + testCertsDir + "/ca.crt", + "--service-account-private-key-file=" + testCertsDir + "/sa.key", + "--cluster-signing-cert-file=" + testCertsDir + "/ca.crt", + "--cluster-signing-key-file=" + testCertsDir + "/ca.key", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + }, + }, + { + name: "custom cloudprovider for v1.11.3", + cfg: &kubeadmapi.ClusterConfiguration{ + Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, + CertificatesDir: testCertsDir, + KubernetesVersion: "v1.11.3", + }, + expected: []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + testCertsDir + "/ca.crt", + "--service-account-private-key-file=" + testCertsDir + "/sa.key", + "--cluster-signing-cert-file=" + testCertsDir + "/ca.crt", + "--cluster-signing-key-file=" + testCertsDir + "/ca.key", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + "--allocate-node-cidrs=true", + "--cluster-cidr=10.0.1.15/16", + "--node-cidr-mask-size=24", + }, + }, + { + name: "custom extra-args for v1.11.3", + cfg: &kubeadmapi.ClusterConfiguration{ + Networking: kubeadmapi.Networking{PodSubnet: "10.0.1.15/16"}, + ControllerManagerExtraArgs: map[string]string{"node-cidr-mask-size": "20"}, + CertificatesDir: testCertsDir, + KubernetesVersion: "v1.11.3", + }, + expected: []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + testCertsDir + "/ca.crt", + "--service-account-private-key-file=" + testCertsDir + "/sa.key", + "--cluster-signing-cert-file=" + testCertsDir + "/ca.crt", + "--cluster-signing-key-file=" + testCertsDir + "/ca.key", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + "--allocate-node-cidrs=true", + "--cluster-cidr=10.0.1.15/16", + "--node-cidr-mask-size=20", + }, + }, + { + name: "custom IPv6 networking for v1.11.3", + cfg: &kubeadmapi.ClusterConfiguration{ + Networking: kubeadmapi.Networking{PodSubnet: "2001:db8::/64"}, + CertificatesDir: testCertsDir, + KubernetesVersion: "v1.11.3", + }, + expected: []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + testCertsDir + "/ca.crt", + "--service-account-private-key-file=" + testCertsDir + "/sa.key", + "--cluster-signing-cert-file=" + testCertsDir + "/ca.crt", + "--cluster-signing-key-file=" + testCertsDir + "/ca.key", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + "--allocate-node-cidrs=true", + "--cluster-cidr=2001:db8::/64", + "--node-cidr-mask-size=80", + }, + }, } for _, rt := range tests { @@ -868,11 +957,11 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) { expectedArgFunc func(dir string) []string }{ { - name: "caKeyPresent-false", + name: "caKeyPresent-false for v1.12.0-beta.2", cfg: &kubeadmapi.InitConfiguration{ APIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4"}, ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, }, }, @@ -897,11 +986,11 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) { }, }, { - name: "caKeyPresent true", + name: "caKeyPresent true for v1.12.0-beta.2", cfg: &kubeadmapi.InitConfiguration{ APIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4"}, ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - KubernetesVersion: "v1.7.0", + KubernetesVersion: "v1.12.0-beta.2", Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, }, }, @@ -925,6 +1014,56 @@ func TestGetControllerManagerCommandExternalCA(t *testing.T) { } }, }, + { + name: "caKeyPresent-false for v1.11.3", + cfg: &kubeadmapi.InitConfiguration{ + APIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4"}, + ClusterConfiguration: kubeadmapi.ClusterConfiguration{ + KubernetesVersion: "v1.11.3", + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + }, + }, + caKeyPresent: false, + expectedArgFunc: func(tmpdir string) []string { + return []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + tmpdir + "/ca.crt", + "--service-account-private-key-file=" + tmpdir + "/sa.key", + "--cluster-signing-cert-file=", + "--cluster-signing-key-file=", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + } + }, + }, + { + name: "caKeyPresent true for v1.11.3", + cfg: &kubeadmapi.InitConfiguration{ + APIEndpoint: kubeadmapi.APIEndpoint{AdvertiseAddress: "1.2.3.4"}, + ClusterConfiguration: kubeadmapi.ClusterConfiguration{ + KubernetesVersion: "v1.11.3", + Networking: kubeadmapi.Networking{ServiceSubnet: "10.96.0.0/12", DNSDomain: "cluster.local"}, + }, + }, + caKeyPresent: true, + expectedArgFunc: func(tmpdir string) []string { + return []string{ + "kube-controller-manager", + "--address=127.0.0.1", + "--leader-elect=true", + "--kubeconfig=" + kubeadmconstants.KubernetesDir + "/controller-manager.conf", + "--root-ca-file=" + tmpdir + "/ca.crt", + "--service-account-private-key-file=" + tmpdir + "/sa.key", + "--cluster-signing-cert-file=" + tmpdir + "/ca.crt", + "--cluster-signing-key-file=" + tmpdir + "/ca.key", + "--use-service-account-credentials=true", + "--controllers=*,bootstrapsigner,tokencleaner", + } + }, + }, } for _, test := range tests { diff --git a/cmd/kubeadm/app/phases/kubelet/BUILD b/cmd/kubeadm/app/phases/kubelet/BUILD index c57dbac93f3..bc2f2b9f4b8 100644 --- a/cmd/kubeadm/app/phases/kubelet/BUILD +++ b/cmd/kubeadm/app/phases/kubelet/BUILD @@ -23,11 +23,11 @@ go_library( "//pkg/util/initsystem:go_default_library", "//pkg/util/node:go_default_library", "//pkg/util/procfs:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", @@ -47,10 +47,10 @@ go_test( "//cmd/kubeadm/app/constants:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/apis/config:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/cmd/kubeadm/app/phases/kubelet/config.go b/cmd/kubeadm/app/phases/kubelet/config.go index 87dea285976..d6390408e71 100644 --- a/cmd/kubeadm/app/phases/kubelet/config.go +++ b/cmd/kubeadm/app/phases/kubelet/config.go @@ -26,6 +26,7 @@ import ( rbac "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" @@ -33,7 +34,6 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" rbachelper "k8s.io/kubernetes/pkg/apis/rbac/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - "k8s.io/kubernetes/pkg/util/version" ) // WriteConfigToDisk writes the kubelet config object down to a file diff --git a/cmd/kubeadm/app/phases/kubelet/config_test.go b/cmd/kubeadm/app/phases/kubelet/config_test.go index db6d55393c9..1949c472dff 100644 --- a/cmd/kubeadm/app/phases/kubelet/config_test.go +++ b/cmd/kubeadm/app/phases/kubelet/config_test.go @@ -22,11 +22,11 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" - "k8s.io/kubernetes/pkg/util/version" ) func TestCreateConfigMap(t *testing.T) { @@ -35,7 +35,7 @@ func TestCreateConfigMap(t *testing.T) { cfg := &kubeadmapi.InitConfiguration{ NodeRegistration: kubeadmapi.NodeRegistrationOptions{Name: nodeName}, ClusterConfiguration: kubeadmapi.ClusterConfiguration{ - KubernetesVersion: "v1.11.0", + KubernetesVersion: "v1.12.0", ComponentConfigs: kubeadmapi.ComponentConfigs{ Kubelet: &kubeletconfig.KubeletConfiguration{}, }, diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic.go b/cmd/kubeadm/app/phases/kubelet/dynamic.go index f16131b075a..d8b381ff813 100644 --- a/cmd/kubeadm/app/phases/kubelet/dynamic.go +++ b/cmd/kubeadm/app/phases/kubelet/dynamic.go @@ -21,10 +21,10 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - "k8s.io/kubernetes/pkg/util/version" ) // EnableDynamicConfigForNode updates the Node's ConfigSource to enable Dynamic Kubelet Configuration, depending on what version the kubelet is diff --git a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go index 150eeda405b..c9d11e0629b 100644 --- a/cmd/kubeadm/app/phases/kubelet/dynamic_test.go +++ b/cmd/kubeadm/app/phases/kubelet/dynamic_test.go @@ -22,10 +22,10 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - "k8s.io/kubernetes/pkg/util/version" ) func TestEnableDynamicConfigForNode(t *testing.T) { diff --git a/cmd/kubeadm/app/phases/upgrade/BUILD b/cmd/kubeadm/app/phases/upgrade/BUILD index 74637b304ea..8007374bf91 100644 --- a/cmd/kubeadm/app/phases/upgrade/BUILD +++ b/cmd/kubeadm/app/phases/upgrade/BUILD @@ -25,6 +25,7 @@ go_library( "//cmd/kubeadm/app/phases/bootstraptoken/clusterinfo:go_default_library", "//cmd/kubeadm/app/phases/bootstraptoken/node:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/phases/certs/renewal:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/phases/kubelet:go_default_library", @@ -36,7 +37,6 @@ go_library( "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/dryrun:go_default_library", "//cmd/kubeadm/app/util/etcd:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -45,6 +45,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/util/cert:go_default_library", ], @@ -79,16 +80,18 @@ go_test( "//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/phases/certs:go_default_library", + "//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library", "//cmd/kubeadm/app/phases/controlplane:go_default_library", "//cmd/kubeadm/app/phases/etcd:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", "//cmd/kubeadm/app/util/config:go_default_library", "//cmd/kubeadm/app/util/etcd:go_default_library", "//cmd/kubeadm/test:go_default_library", - "//pkg/util/version:go_default_library", + "//cmd/kubeadm/test/certs:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", "//vendor/github.com/coreos/etcd/pkg/transport:go_default_library", diff --git a/cmd/kubeadm/app/phases/upgrade/compute.go b/cmd/kubeadm/app/phases/upgrade/compute.go index 326951168fd..a9c7f13d34c 100644 --- a/cmd/kubeadm/app/phases/upgrade/compute.go +++ b/cmd/kubeadm/app/phases/upgrade/compute.go @@ -20,12 +20,12 @@ import ( "fmt" "strings" + versionutil "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/features" "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons/dns" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" - versionutil "k8s.io/kubernetes/pkg/util/version" ) // Upgrade defines an upgrade possibility to upgrade from a current version to a new one diff --git a/cmd/kubeadm/app/phases/upgrade/compute_test.go b/cmd/kubeadm/app/phases/upgrade/compute_test.go index 7c7e474f89f..b7b56e1e0f2 100644 --- a/cmd/kubeadm/app/phases/upgrade/compute_test.go +++ b/cmd/kubeadm/app/phases/upgrade/compute_test.go @@ -26,10 +26,10 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + versionutil "k8s.io/apimachinery/pkg/util/version" clientsetfake "k8s.io/client-go/kubernetes/fake" "k8s.io/kubernetes/cmd/kubeadm/app/constants" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" - versionutil "k8s.io/kubernetes/pkg/util/version" ) type fakeVersionGetter struct { @@ -715,7 +715,7 @@ func TestGetAvailableUpgrades(t *testing.T) { KubeVersion: "v1.12.0", KubeadmVersion: "v1.12.0", DNSType: "kube-dns", - DNSVersion: "1.14.10", + DNSVersion: "1.14.13", EtcdVersion: "3.2.24", }, }, diff --git a/cmd/kubeadm/app/phases/upgrade/policy.go b/cmd/kubeadm/app/phases/upgrade/policy.go index 25c5d4a2035..0a296ecb74b 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy.go +++ b/cmd/kubeadm/app/phases/upgrade/policy.go @@ -20,8 +20,8 @@ import ( "fmt" "strings" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/cmd/kubeadm/app/phases/upgrade/policy_test.go b/cmd/kubeadm/app/phases/upgrade/policy_test.go index ccf2861273b..696788d7247 100644 --- a/cmd/kubeadm/app/phases/upgrade/policy_test.go +++ b/cmd/kubeadm/app/phases/upgrade/policy_test.go @@ -19,7 +19,7 @@ package upgrade import ( "testing" - "k8s.io/kubernetes/pkg/util/version" + "k8s.io/apimachinery/pkg/util/version" ) func TestEnforceVersionPolicies(t *testing.T) { @@ -34,38 +34,38 @@ func TestEnforceVersionPolicies(t *testing.T) { { name: "minor upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.5", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.5", }, - newK8sVersion: "v1.10.5", + newK8sVersion: "v1.11.5", }, { name: "major upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.2", - kubeadmVersion: "v1.11.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.2", + kubeadmVersion: "v1.12.1", }, - newK8sVersion: "v1.11.0", + newK8sVersion: "v1.12.0", }, { name: "downgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.3", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.3", }, - newK8sVersion: "v1.10.2", + newK8sVersion: "v1.11.2", }, { name: "same version upgrade", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.3", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.3", }, - newK8sVersion: "v1.10.3", + newK8sVersion: "v1.11.3", }, { name: "new version must be higher than v1.10.0", @@ -92,103 +92,103 @@ func TestEnforceVersionPolicies(t *testing.T) { { name: "downgrading two minor versions in one go is not supported", vg: &fakeVersionGetter{ - clusterVersion: "v1.12.3", - kubeletVersion: "v1.12.3", - kubeadmVersion: "v1.12.0", + clusterVersion: "v1.13.3", + kubeletVersion: "v1.13.3", + kubeadmVersion: "v1.13.0", }, - newK8sVersion: "v1.10.3", + newK8sVersion: "v1.11.3", expectedMandatoryErrs: 1, // can't downgrade two minor versions expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm }, { name: "kubeadm version must be higher than the new kube version. However, patch version skews may be forced", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.3", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.3", }, - newK8sVersion: "v1.10.5", + newK8sVersion: "v1.11.5", expectedSkippableErrs: 1, }, { name: "kubeadm version must be higher than the new kube version. Trying to upgrade k8s to a higher minor version than kubeadm itself should never be supported", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.10.3", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.11.3", }, - newK8sVersion: "v1.11.0", + newK8sVersion: "v1.12.0", expectedMandatoryErrs: 1, }, { name: "the maximum skew between the cluster version and the kubelet versions should be one minor version. This may be forced through though.", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.9.8", - kubeadmVersion: "v1.11.0", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.10.8", + kubeadmVersion: "v1.12.0", }, - newK8sVersion: "v1.11.0", + newK8sVersion: "v1.12.0", expectedSkippableErrs: 1, }, { name: "experimental upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0-beta.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0-beta.1", }, - newK8sVersion: "v1.11.0-beta.1", + newK8sVersion: "v1.12.0-beta.1", allowExperimental: true, }, { name: "release candidate upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0-rc.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0-rc.1", }, - newK8sVersion: "v1.11.0-rc.1", + newK8sVersion: "v1.12.0-rc.1", allowRCs: true, }, { name: "release candidate upgrades supported if the flag is set", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0-rc.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0-rc.1", }, - newK8sVersion: "v1.11.0-rc.1", + newK8sVersion: "v1.12.0-rc.1", allowExperimental: true, }, { name: "the user should not be able to upgrade to an experimental version if they haven't opted into that", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0-beta.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0-beta.1", }, - newK8sVersion: "v1.11.0-beta.1", + newK8sVersion: "v1.12.0-beta.1", allowRCs: true, expectedSkippableErrs: 1, }, { name: "the user should not be able to upgrade to an release candidate version if they haven't opted into that", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0-rc.1", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0-rc.1", }, - newK8sVersion: "v1.11.0-rc.1", + newK8sVersion: "v1.12.0-rc.1", expectedSkippableErrs: 1, }, { name: "the user can't use a newer minor version of kubeadm to upgrade an older version of kubeadm", vg: &fakeVersionGetter{ - clusterVersion: "v1.10.3", - kubeletVersion: "v1.10.3", - kubeadmVersion: "v1.11.0", + clusterVersion: "v1.11.3", + kubeletVersion: "v1.11.3", + kubeadmVersion: "v1.12.0", }, - newK8sVersion: "v1.10.6", + newK8sVersion: "v1.11.6", expectedSkippableErrs: 1, // can't upgrade old k8s with newer kubeadm }, } diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index 08f95268cdd..1c813ac315c 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" certutil "k8s.io/client-go/util/cert" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" @@ -43,7 +44,6 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/phases/uploadconfig" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" - "k8s.io/kubernetes/pkg/util/version" ) var expiry = 180 * 24 * time.Hour diff --git a/cmd/kubeadm/app/phases/upgrade/prepull_test.go b/cmd/kubeadm/app/phases/upgrade/prepull_test.go index 2789d8e644c..9fd38b77c10 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull_test.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" "time" - //"k8s.io/kubernetes/pkg/util/version" + //"k8s.io/apimachinery/pkg/util/version" ) // failedCreatePrepuller is a fake prepuller that errors for kube-controller-manager in the CreateFunc call diff --git a/cmd/kubeadm/app/phases/upgrade/selfhosted.go b/cmd/kubeadm/app/phases/upgrade/selfhosted.go index efd37af0876..b92af6978c1 100644 --- a/cmd/kubeadm/app/phases/upgrade/selfhosted.go +++ b/cmd/kubeadm/app/phases/upgrade/selfhosted.go @@ -23,13 +23,13 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 900b39c98bd..06518d4a1d4 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -22,15 +22,16 @@ import ( "strings" "time" + "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/renewal" controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" "k8s.io/kubernetes/cmd/kubeadm/app/util" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" - "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -185,31 +186,8 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP } } - if cfg.Etcd.Local != nil { - // ensure etcd certs are generated for etcd and kube-apiserver - if component == constants.Etcd || component == constants.KubeAPIServer { - caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(cfg) - if err != nil { - return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err) - } - - if component == constants.Etcd { - if err := certsphase.KubeadmCertEtcdServer.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s certificate and key: %v", constants.Etcd, err) - } - if err := certsphase.KubeadmCertEtcdPeer.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s peer certificate and key: %v", constants.Etcd, err) - } - if err := certsphase.KubeadmCertEtcdHealthcheck.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s healthcheck certificate and key: %v", constants.Etcd, err) - } - } - if component == constants.KubeAPIServer { - if err := certsphase.KubeadmCertEtcdAPIClient.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s %s-client certificate and key: %v", constants.KubeAPIServer, constants.Etcd, err) - } - } - } + if err := renewCerts(cfg, component); err != nil { + return fmt.Errorf("failed to renew certificates for component %q: %v", component, err) } // The old manifest is here; in the /etc/kubernetes/manifests/ @@ -511,7 +489,7 @@ func rollbackOldManifests(oldManifests map[string]string, origErr error, pathMgr return fmt.Errorf("couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: %v", errs) } -// rollbackEtcdData rolls back the the content of etcd folder if something went wrong. +// rollbackEtcdData rolls back the content of etcd folder if something went wrong. // When the folder contents are successfully rolled back, nil is returned, otherwise an error is returned. func rollbackEtcdData(cfg *kubeadmapi.InitConfiguration, pathMgr StaticPodPathManager) error { backupEtcdDir := pathMgr.BackupEtcdDir() @@ -524,3 +502,35 @@ func rollbackEtcdData(cfg *kubeadmapi.InitConfiguration, pathMgr StaticPodPathMa return nil } + +func renewCerts(cfg *kubeadmapi.InitConfiguration, component string) error { + if cfg.Etcd.Local != nil { + // ensure etcd certs are loaded for etcd and kube-apiserver + if component == constants.Etcd || component == constants.KubeAPIServer { + caCert, caKey, err := certsphase.LoadCertificateAuthority(cfg.CertificatesDir, certsphase.KubeadmCertEtcdCA.BaseName) + if err != nil { + return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err) + } + renewer := renewal.NewFileRenewal(caCert, caKey) + + if component == constants.Etcd { + for _, cert := range []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertEtcdServer, + &certsphase.KubeadmCertEtcdPeer, + &certsphase.KubeadmCertEtcdHealthcheck, + } { + if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { + return fmt.Errorf("failed to renew %s certificate and key: %v", cert.Name, err) + } + } + } + if component == constants.KubeAPIServer { + cert := certsphase.KubeadmCertEtcdAPIClient + if err := renewal.RenewExistingCert(cfg.CertificatesDir, cert.BaseName, renewer); err != nil { + return fmt.Errorf("failed to renew %s certificate and key: %v", cert.Name, err) + } + } + } + } + return nil +} diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go index 1eb41f35cb2..ea58b28f23e 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods_test.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods_test.go @@ -20,6 +20,7 @@ import ( "crypto/sha256" "fmt" "io/ioutil" + "math/big" "os" "path/filepath" "strings" @@ -32,11 +33,14 @@ import ( "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/constants" certsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil" controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane" etcdphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/etcd" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" etcdutil "k8s.io/kubernetes/cmd/kubeadm/app/util/etcd" + testutil "k8s.io/kubernetes/cmd/kubeadm/test" + certstestutil "k8s.io/kubernetes/cmd/kubeadm/test/certs" ) const ( @@ -451,11 +455,27 @@ func TestStaticPodControlPlane(t *testing.T) { t.Fatalf("couldn't read temp file: %v", err) } - newcfg, err := getConfig("v1.10.0", tempCertsDir, tmpEtcdDataDir) + newcfg, err := getConfig("v1.11.0", tempCertsDir, tmpEtcdDataDir) if err != nil { t.Fatalf("couldn't create config: %v", err) } + // create the kubeadm etcd certs + caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(newcfg) + if err != nil { + t.Fatalf("couldn't create new CA certificate: %v", err) + } + for _, cert := range []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertEtcdServer, + &certsphase.KubeadmCertEtcdPeer, + &certsphase.KubeadmCertEtcdHealthcheck, + &certsphase.KubeadmCertEtcdAPIClient, + } { + if err := cert.CreateFromCA(newcfg, caCert, caKey); err != nil { + t.Fatalf("couldn't create certificate %s: %v", cert.Name, err) + } + } + actualErr := StaticPodControlPlane( waiter, pathMgr, @@ -606,3 +626,113 @@ func TestCleanupDirs(t *testing.T) { }) } } + +func TestRenewCerts(t *testing.T) { + caCert, caKey := certstestutil.SetupCertificateAuthorithy(t) + t.Run("all certs exist, should be rotated", func(t *testing.T) { + }) + tests := []struct { + name string + component string + skipCreateCA bool + shouldErrorOnRenew bool + certsShouldExist []*certsphase.KubeadmCert + }{ + { + name: "all certs exist, should be rotated", + component: constants.Etcd, + certsShouldExist: []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertEtcdServer, + &certsphase.KubeadmCertEtcdPeer, + &certsphase.KubeadmCertEtcdHealthcheck, + }, + }, + { + name: "just renew API cert", + component: constants.KubeAPIServer, + certsShouldExist: []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertEtcdAPIClient, + }, + }, + { + name: "ignores other compnonents", + skipCreateCA: true, + component: constants.KubeScheduler, + }, + { + name: "missing a cert to renew", + component: constants.Etcd, + shouldErrorOnRenew: true, + certsShouldExist: []*certsphase.KubeadmCert{ + &certsphase.KubeadmCertEtcdServer, + &certsphase.KubeadmCertEtcdPeer, + }, + }, + { + name: "no CA, cannot continue", + component: constants.Etcd, + skipCreateCA: true, + shouldErrorOnRenew: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Setup up basic requities + tmpDir := testutil.SetupTempDir(t) + defer os.RemoveAll(tmpDir) + + cfg := testutil.GetDefaultInternalConfig(t) + cfg.CertificatesDir = tmpDir + + if !test.skipCreateCA { + if err := pkiutil.WriteCertAndKey(tmpDir, constants.EtcdCACertAndKeyBaseName, caCert, caKey); err != nil { + t.Fatalf("couldn't write out CA: %v", err) + } + } + + // Create expected certs + for _, kubeCert := range test.certsShouldExist { + if err := kubeCert.CreateFromCA(cfg, caCert, caKey); err != nil { + t.Fatalf("couldn't renew certificate %q: %v", kubeCert.Name, err) + } + } + + // Load expected certs to check if serial numbers changes + certMaps := make(map[*certsphase.KubeadmCert]big.Int) + for _, kubeCert := range test.certsShouldExist { + cert, err := pkiutil.TryLoadCertFromDisk(tmpDir, kubeCert.BaseName) + if err != nil { + t.Fatalf("couldn't load certificate %q: %v", kubeCert.Name, err) + } + certMaps[kubeCert] = *cert.SerialNumber + } + + // Renew everything + err := renewCerts(cfg, test.component) + if test.shouldErrorOnRenew { + if err == nil { + t.Fatal("expected renewal error, got nothing") + } + // expected error, got error + return + } + if err != nil { + t.Fatalf("couldn't renew certificates: %v", err) + } + + // See if the certificate serial numbers change + for kubeCert, cert := range certMaps { + newCert, err := pkiutil.TryLoadCertFromDisk(tmpDir, kubeCert.BaseName) + if err != nil { + t.Errorf("couldn't load new certificate %q: %v", kubeCert.Name, err) + continue + } + if cert.Cmp(newCert.SerialNumber) == 0 { + t.Errorf("certifitate %v was not reissued", kubeCert.Name) + } + } + }) + + } +} diff --git a/cmd/kubeadm/app/phases/upgrade/versiongetter.go b/cmd/kubeadm/app/phases/upgrade/versiongetter.go index d9c9d69aace..27648056da4 100644 --- a/cmd/kubeadm/app/phases/upgrade/versiongetter.go +++ b/cmd/kubeadm/app/phases/upgrade/versiongetter.go @@ -22,9 +22,9 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + versionutil "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - versionutil "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/pkg/version" ) diff --git a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go index 19fcb6c5e37..e0a99653108 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go +++ b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig_test.go @@ -72,7 +72,7 @@ func TestUploadConfiguration(t *testing.T) { AdvertiseAddress: "1.2.3.4", }, ClusterConfiguration: kubeadmapiv1alpha3.ClusterConfiguration{ - KubernetesVersion: "v1.10.10", + KubernetesVersion: "v1.11.10", }, BootstrapTokens: []kubeadmapiv1alpha3.BootstrapToken{ { diff --git a/cmd/kubeadm/app/preflight/BUILD b/cmd/kubeadm/app/preflight/BUILD index 287db1c2e79..4f2f35c3875 100644 --- a/cmd/kubeadm/app/preflight/BUILD +++ b/cmd/kubeadm/app/preflight/BUILD @@ -24,10 +24,10 @@ go_library( "//pkg/registry/core/service/ipallocator:go_default_library", "//pkg/util/initsystem:go_default_library", "//pkg/util/ipvs:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//vendor/github.com/PuerkitoBio/purell:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/cmd/kubeadm/app/preflight/checks.go b/cmd/kubeadm/app/preflight/checks.go index 4c6a958b305..e3ff7ee0ceb 100644 --- a/cmd/kubeadm/app/preflight/checks.go +++ b/cmd/kubeadm/app/preflight/checks.go @@ -41,6 +41,7 @@ import ( netutil "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/sets" + versionutil "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/images" @@ -49,7 +50,6 @@ import ( "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/util/initsystem" ipvsutil "k8s.io/kubernetes/pkg/util/ipvs" - versionutil "k8s.io/kubernetes/pkg/util/version" kubeadmversion "k8s.io/kubernetes/pkg/version" utilsexec "k8s.io/utils/exec" ) @@ -501,7 +501,7 @@ func (subnet HTTPProxyCIDRCheck) Check() (warnings, errors []error) { return nil, nil } -// SystemVerificationCheck defines struct used for for running the system verification node check in test/e2e_node/system +// SystemVerificationCheck defines struct used for running the system verification node check in test/e2e_node/system type SystemVerificationCheck struct { IsDocker bool } diff --git a/cmd/kubeadm/app/preflight/checks_test.go b/cmd/kubeadm/app/preflight/checks_test.go index ec68c35840b..c8ecf76fb1d 100644 --- a/cmd/kubeadm/app/preflight/checks_test.go +++ b/cmd/kubeadm/app/preflight/checks_test.go @@ -639,13 +639,13 @@ func TestKubeletVersionCheck(t *testing.T) { expectErrors bool expectWarnings bool }{ - {"v1.11.2", "", false, false}, // check minimally supported version when there is no information about control plane - {"v1.8.3", "v1.8.8", true, false}, // too old kubelet (older than kubeadmconstants.MinimumKubeletVersion), should fail. - {"v1.10.0", "v1.10.5", false, false}, // kubelet within same major.minor as control plane - {"v1.10.5", "v1.10.1", false, false}, // kubelet is newer, but still within same major.minor as control plane - {"v1.10.0", "v1.11.1", false, false}, // kubelet is lower than control plane, but newer than minimally supported - {"v1.11.0-alpha.1", "v1.10.1", true, false}, // kubelet is newer (development build) than control plane, should fail. - {"v1.11.0", "v1.10.5", true, false}, // kubelet is newer (release) than control plane, should fail. + {"v1.12.2", "", false, false}, // check minimally supported version when there is no information about control plane + {"v1.9.3", "v1.9.8", true, false}, // too old kubelet (older than kubeadmconstants.MinimumKubeletVersion), should fail. + {"v1.11.0", "v1.11.5", false, false}, // kubelet within same major.minor as control plane + {"v1.11.5", "v1.11.1", false, false}, // kubelet is newer, but still within same major.minor as control plane + {"v1.11.0", "v1.12.1", false, false}, // kubelet is lower than control plane, but newer than minimally supported + {"v1.12.0-alpha.1", "v1.11.1", true, false}, // kubelet is newer (development build) than control plane, should fail. + {"v1.12.0", "v1.11.5", true, false}, // kubelet is newer (release) than control plane, should fail. } for _, tc := range cases { diff --git a/cmd/kubeadm/app/preflight/utils.go b/cmd/kubeadm/app/preflight/utils.go index 14bae5c6b13..3f94fd76af3 100644 --- a/cmd/kubeadm/app/preflight/utils.go +++ b/cmd/kubeadm/app/preflight/utils.go @@ -21,7 +21,7 @@ import ( "regexp" "strings" - "k8s.io/kubernetes/pkg/util/version" + "k8s.io/apimachinery/pkg/util/version" utilsexec "k8s.io/utils/exec" ) diff --git a/cmd/kubeadm/app/util/BUILD b/cmd/kubeadm/app/util/BUILD index 4dc0c068f85..eec96666bb3 100644 --- a/cmd/kubeadm/app/util/BUILD +++ b/cmd/kubeadm/app/util/BUILD @@ -24,7 +24,6 @@ go_library( deps = [ "//cmd/kubeadm/app/apis/kubeadm:go_default_library", "//cmd/kubeadm/app/constants:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -32,6 +31,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/github.com/ghodss/yaml:go_default_library", diff --git a/cmd/kubeadm/app/util/config/BUILD b/cmd/kubeadm/app/util/config/BUILD index a38912c6737..266f83214b1 100644 --- a/cmd/kubeadm/app/util/config/BUILD +++ b/cmd/kubeadm/app/util/config/BUILD @@ -24,12 +24,12 @@ go_library( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//pkg/util/node:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/bootstrap/token/util:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", @@ -55,10 +55,10 @@ go_test( "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/util:go_default_library", "//cmd/kubeadm/app/util/apiclient:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/github.com/pmezard/go-difflib/difflib:go_default_library", diff --git a/cmd/kubeadm/app/util/config/cluster.go b/cmd/kubeadm/app/util/config/cluster.go index 7917381e7c4..92b9df3c9f0 100644 --- a/cmd/kubeadm/app/util/config/cluster.go +++ b/cmd/kubeadm/app/util/config/cluster.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" certutil "k8s.io/client-go/util/cert" @@ -34,7 +35,6 @@ import ( kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" "k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs" "k8s.io/kubernetes/cmd/kubeadm/app/constants" - "k8s.io/kubernetes/pkg/util/version" ) // FetchConfigFromFileOrCluster fetches configuration required for upgrading your cluster from a file (which has precedence) or a ConfigMap in the cluster @@ -73,12 +73,10 @@ func loadInitConfigurationFromFile(cfgPath string) (*kubeadmapi.InitConfiguratio // Unmarshal the versioned configuration populated from the file, // convert it to the internal API types, then default and validate - // NB the file can be one of - // - a single YAML, with a v1alpha2.MasterConfiguration object (with embedded component configs) - // - multiple YAML, with a combination of - // - a YAML with a v1alpha3.InitConfiguration object - // - a YAML with a v1alpha3.ClusterConfiguration object (without embedded component configs) - // - separated YAML for components configs + // NB the file contains multiple YAML, with a combination of + // - a YAML with a InitConfiguration object + // - a YAML with a ClusterConfiguration object (without embedded component configs) + // - separated YAML for components configs initcfg, err := BytesToInternalConfig(configBytes) if err != nil { return nil, err @@ -95,35 +93,11 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte return nil, err } - // TODO: remove in V1.13 - // If InitConfigurationConfigMapKey exist, the kubeadm-config was created with v1.11 - if _, ok := configMap.Data[constants.InitConfigurationConfigMapKey]; ok { - return getInitConfigurationFromConfigMapV11(configMap.Data) - } - - return getInitConfigurationFromConfigMaps(kubeconfigDir, client, configMap.Data, newControlPlane) -} - -func getInitConfigurationFromConfigMapV11(data map[string]string) (*kubeadmapi.InitConfiguration, error) { - configBytes := []byte(data[constants.InitConfigurationConfigMapKey]) - - // Unmarshal the versioned configuration populated from the file, - // convert it to the internal API types, then default and validate - // NB the config map created with v11 is a single YAML, with a v1alpha2.MasterConfiguration object (with embedded component configs) - initcfg, err := BytesToInternalConfig(configBytes) - if err != nil { - return nil, err - } - - return initcfg, nil -} - -func getInitConfigurationFromConfigMaps(kubeconfigDir string, client clientset.Interface, data map[string]string, newControlPlane bool) (*kubeadmapi.InitConfiguration, error) { - // In case of cluster crated with v1.12 InitConfiguration is composed with data from different places + // InitConfiguration is composed with data from different places initcfg := &kubeadmapi.InitConfiguration{} // gets ClusterConfiguration from kubeadm-config - clusterConfigurationData, ok := data[constants.ClusterConfigurationConfigMapKey] + clusterConfigurationData, ok := configMap.Data[constants.ClusterConfigurationConfigMapKey] if !ok { return nil, fmt.Errorf("unexpected error when reading kubeadm-config ConfigMap: %s key value pair missing", constants.ClusterConfigurationConfigMapKey) } @@ -144,7 +118,7 @@ func getInitConfigurationFromConfigMaps(kubeconfigDir string, client clientset.I return nil, err } // gets the APIEndpoint for the current node from then ClusterStatus in the kubeadm-config ConfigMap - if err := getAPIEndpoint(data, initcfg.NodeRegistration.Name, &initcfg.APIEndpoint); err != nil { + if err := getAPIEndpoint(configMap.Data, initcfg.NodeRegistration.Name, &initcfg.APIEndpoint); err != nil { return nil, err } } diff --git a/cmd/kubeadm/app/util/config/cluster_test.go b/cmd/kubeadm/app/util/config/cluster_test.go index 77ba145f917..a1c13c4047c 100644 --- a/cmd/kubeadm/app/util/config/cluster_test.go +++ b/cmd/kubeadm/app/util/config/cluster_test.go @@ -26,26 +26,18 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" clientsetfake "k8s.io/client-go/kubernetes/fake" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" - "k8s.io/kubernetes/pkg/util/version" ) var k8sVersionString = "v1.12.0" var k8sVersion = version.MustParseGeneric(k8sVersionString) var nodeName = "mynode" var cfgFiles = map[string][]byte{ - "MasterConfiguration_v1alpha2": []byte(` -apiVersion: kubeadm.k8s.io/v1alpha2 -kind: MasterConfiguration -kubernetesVersion: ` + k8sVersionString + ` -api: - advertiseAddress: 1.2.3.4 - bindPort: 1234 -`), "InitConfiguration_v1alpha3": []byte(` apiVersion: kubeadm.k8s.io/v1alpha3 kind: InitConfiguration @@ -171,10 +163,7 @@ func TestLoadInitConfigurationFromFile(t *testing.T) { name string fileContents []byte }{ - { - name: "v1alpha2.MasterConfiguration", - fileContents: cfgFiles["MasterConfiguration_v1alpha2"], - }, + // TODO: implemen v1beta1 tests after introducing v1beta1 { name: "v1alpha3.partial1", fileContents: cfgFiles["InitConfiguration_v1alpha3"], @@ -523,17 +512,6 @@ func TestGetInitConfigurationFromCluster(t *testing.T) { newControlPlane bool expectedError bool }{ - { //TODO: remove in V1.13 - name: "before v1.11", // single YAML, with a v1alpha2.MasterConfiguration object (with embedded component configs) - configMaps: []fakeConfigMap{ - { - name: kubeadmconstants.InitConfigurationConfigMap, - data: map[string]string{ - kubeadmconstants.InitConfigurationConfigMapKey: string(cfgFiles["MasterConfiguration_v1alpha2"]), - }, - }, - }, - }, { name: "invalid - No kubeadm-config ConfigMap", expectedError: true, diff --git a/cmd/kubeadm/app/util/config/common.go b/cmd/kubeadm/app/util/config/common.go index 3573f2e9789..0e5ec40b6b9 100644 --- a/cmd/kubeadm/app/util/config/common.go +++ b/cmd/kubeadm/app/util/config/common.go @@ -26,12 +26,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" netutil "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/version" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" kubeadmscheme "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/scheme" kubeadmapiv1alpha3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha3" "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" - "k8s.io/kubernetes/pkg/util/version" ) // AnyConfigFileAndDefaultsToInternal reads either a InitConfiguration or JoinConfiguration and unmarshals it @@ -79,11 +79,11 @@ func DetectUnsupportedVersion(b []byte) error { // tell them how to upgrade. The support matrix will look something like this now and in the future: // v1.10 and earlier: v1alpha1 // v1.11: v1alpha1 read-only, writes only v1alpha2 config - // v1.12: v1alpha2 read-only, writes only v1beta1 config. Warns if the user tries to use v1alpha1 - // v1.13 and v1.14: v1beta1 read-only, writes only v1 config. Warns if the user tries to use v1alpha1 or v1alpha2. - // v1.15: v1 is the only supported format. + // v1.12: v1alpha2 read-only, writes only v1alpha3 config. Warns if the user tries to use v1alpha1 + // v1.13: v1alpha3 read-only, writes only v1beta1 config. Warns if the user tries to use v1alpha1 or v1alpha2 oldKnownAPIVersions := map[string]string{ "kubeadm.k8s.io/v1alpha1": "v1.11", + "kubeadm.k8s.io/v1alpha2": "v1.12", } // If we find an old API version in this gvk list, error out and tell the user why this doesn't work knownKinds := map[string]bool{} @@ -94,7 +94,7 @@ func DetectUnsupportedVersion(b []byte) error { knownKinds[gvk.Kind] = true } // InitConfiguration, MasterConfiguration and NodeConfiguration are mutually exclusive, error if more than one are specified - mutuallyExclusive := []string{constants.InitConfigurationKind, constants.MasterConfigurationKind, constants.JoinConfigurationKind, constants.NodeConfigurationKind} + mutuallyExclusive := []string{constants.InitConfigurationKind, constants.JoinConfigurationKind} foundOne := false for _, kind := range mutuallyExclusive { if knownKinds[kind] { diff --git a/cmd/kubeadm/app/util/config/common_test.go b/cmd/kubeadm/app/util/config/common_test.go index c945a1a9dd6..ce671391a9d 100644 --- a/cmd/kubeadm/app/util/config/common_test.go +++ b/cmd/kubeadm/app/util/config/common_test.go @@ -84,10 +84,12 @@ func TestDetectUnsupportedVersion(t *testing.T) { { name: "Master_v1alpha2", fileContents: files["Master_v1alpha2"], + expectedErr: true, }, { name: "Node_v1alpha2", fileContents: files["Node_v1alpha2"], + expectedErr: true, }, { name: "Init_v1alpha3", @@ -97,16 +99,6 @@ func TestDetectUnsupportedVersion(t *testing.T) { name: "Join_v1alpha3", fileContents: files["Join_v1alpha3"], }, - { - name: "DuplicateMaster", - fileContents: bytes.Join([][]byte{files["Master_v1alpha2"], files["Master_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, - { - name: "DuplicateNode", - fileContents: bytes.Join([][]byte{files["Node_v1alpha2"], files["Node_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, { name: "DuplicateInit", fileContents: bytes.Join([][]byte{files["Init_v1alpha3"], files["Init_v1alpha3"]}, []byte(constants.YAMLDocumentSeparator)), @@ -132,31 +124,7 @@ func TestDetectUnsupportedVersion(t *testing.T) { fileContents: bytes.Join([][]byte{files["Foo"], files["Master_v1alpha1"]}, []byte(constants.YAMLDocumentSeparator)), expectedErr: true, }, - { - name: "MustNotMixMasterNode", - fileContents: bytes.Join([][]byte{files["Master_v1alpha2"], files["Node_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, - { - name: "MustNotMixMasterJoin", - fileContents: bytes.Join([][]byte{files["Master_v1alpha2"], files["Join_v1alpha3"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, - { - name: "MustNotMixJoinNode", - fileContents: bytes.Join([][]byte{files["Join_v1alpha3"], files["Node_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, - { - name: "MustNotMixInitMaster", - fileContents: bytes.Join([][]byte{files["Init_v1alpha3"], files["Master_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, - { - name: "MustNotMixInitNode", - fileContents: bytes.Join([][]byte{files["Init_v1alpha3"], files["Node_v1alpha2"]}, []byte(constants.YAMLDocumentSeparator)), - expectedErr: true, - }, + // TODO: implement mustnotMix v1alpha3 v1beta1 after introducing v1beta1 { name: "MustNotMixInitJoin", fileContents: bytes.Join([][]byte{files["Init_v1alpha3"], files["Join_v1alpha3"]}, []byte(constants.YAMLDocumentSeparator)), diff --git a/cmd/kubeadm/app/util/config/masterconfig_test.go b/cmd/kubeadm/app/util/config/masterconfig_test.go index f35f9f158dd..de57d4b1e90 100644 --- a/cmd/kubeadm/app/util/config/masterconfig_test.go +++ b/cmd/kubeadm/app/util/config/masterconfig_test.go @@ -30,12 +30,12 @@ import ( ) const ( - master_v1alpha2YAML = "testdata/conversion/master/v1alpha2.yaml" - master_v1alpha3YAML = "testdata/conversion/master/v1alpha3.yaml" - master_internalYAML = "testdata/conversion/master/internal.yaml" - master_incompleteYAML = "testdata/defaulting/master/incomplete.yaml" - master_defaultedYAML = "testdata/defaulting/master/defaulted.yaml" - master_invalidYAML = "testdata/validation/invalid_mastercfg.yaml" + master_v1alpha3YAML = "testdata/conversion/master/v1alpha3.yaml" + //TODO master_v1beta1YAML = "testdata/conversion/master/v1beta1.yaml" after introducing v1beta1 + master_internalYAML = "testdata/conversion/master/internal.yaml" + //TODO master_incompleteYAML = "testdata/defaulting/master/incomplete.yaml" (using v1alpha3) after introducing v1beta1 + master_defaultedYAML = "testdata/defaulting/master/defaulted.yaml" + master_invalidYAML = "testdata/validation/invalid_mastercfg.yaml" ) func diff(expected, actual []byte) string { @@ -59,24 +59,14 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) { }{ // These tests are reading one file, loading it using ConfigFileAndDefaultsToInternalConfig that all of kubeadm is using for unmarshal of our API types, // and then marshals the internal object to the expected groupVersion - { // v1alpha2 -> internal - name: "v1alpha2ToInternal", - in: master_v1alpha2YAML, - out: master_internalYAML, - groupVersion: kubeadm.SchemeGroupVersion, - }, { // v1alpha3 -> internal name: "v1alpha3ToInternal", in: master_v1alpha3YAML, out: master_internalYAML, groupVersion: kubeadm.SchemeGroupVersion, }, - { // v1alpha2 -> internal -> v1alpha3 - name: "v1alpha2Tov1alpha3", - in: master_v1alpha2YAML, - out: master_v1alpha3YAML, - groupVersion: kubeadmapiv1alpha3.SchemeGroupVersion, - }, + // TODO: implement v1beta1 <-> internal after introducing v1beta1 + // TODO: implement v1alpha3 -> internal -> v1beta1 after introducing v1beta1 { // v1alpha3 -> internal -> v1alpha3 name: "v1alpha3Tov1alpha3", in: master_v1alpha3YAML, @@ -85,13 +75,8 @@ func TestConfigFileAndDefaultsToInternalConfig(t *testing.T) { }, // These tests are reading one file that has only a subset of the fields populated, loading it using ConfigFileAndDefaultsToInternalConfig, // and then marshals the internal object to the expected groupVersion - { // v1alpha2 -> default -> validate -> internal -> v1alpha3 - name: "incompleteYAMLToDefaultedv1alpha3", - in: master_incompleteYAML, - out: master_defaultedYAML, - groupVersion: kubeadmapiv1alpha3.SchemeGroupVersion, - }, - { // v1alpha2 -> validation should fail + // TODO: implement v1alpha3 -> default -> validate -> v1beta1 -> v1alpha3 after introducing v1beta1 + { // v1alpha3 -> validation should fail name: "invalidYAMLShouldFail", in: master_invalidYAML, expectedErr: true, diff --git a/cmd/kubeadm/app/util/config/nodeconfig_test.go b/cmd/kubeadm/app/util/config/nodeconfig_test.go index cda3ac945da..c93882f148a 100644 --- a/cmd/kubeadm/app/util/config/nodeconfig_test.go +++ b/cmd/kubeadm/app/util/config/nodeconfig_test.go @@ -29,12 +29,12 @@ import ( ) const ( - node_v1alpha2YAML = "testdata/conversion/node/v1alpha2.yaml" - node_v1alpha3YAML = "testdata/conversion/node/v1alpha3.yaml" - node_internalYAML = "testdata/conversion/node/internal.yaml" - node_incompleteYAML = "testdata/defaulting/node/incomplete.yaml" - node_defaultedYAML = "testdata/defaulting/node/defaulted.yaml" - node_invalidYAML = "testdata/validation/invalid_nodecfg.yaml" + node_v1alpha3YAML = "testdata/conversion/node/v1alpha3.yaml" + //TODO node_v1beta1YAML = "testdata/conversion/node/v1beta1.yaml" after introducing v1beta1 + node_internalYAML = "testdata/conversion/node/internal.yaml" + //TODO node_incompleteYAML = "testdata/defaulting/node/incomplete.yaml" (using v1alpha3) after introducing v1beta1 + node_defaultedYAML = "testdata/defaulting/node/defaulted.yaml" + node_invalidYAML = "testdata/validation/invalid_nodecfg.yaml" ) func TestNodeConfigFileAndDefaultsToInternalConfig(t *testing.T) { @@ -45,24 +45,14 @@ func TestNodeConfigFileAndDefaultsToInternalConfig(t *testing.T) { }{ // These tests are reading one file, loading it using NodeConfigFileAndDefaultsToInternalConfig that all of kubeadm is using for unmarshal of our API types, // and then marshals the internal object to the expected groupVersion - { // v1alpha2 -> internal - name: "v1alpha2ToInternal", - in: node_v1alpha2YAML, - out: node_internalYAML, - groupVersion: kubeadm.SchemeGroupVersion, - }, { // v1alpha3 -> internal name: "v1alpha3ToInternal", in: node_v1alpha3YAML, out: node_internalYAML, groupVersion: kubeadm.SchemeGroupVersion, }, - { // v1alpha2 -> internal -> v1alpha3 - name: "v1alpha2Tov1alpha3", - in: node_v1alpha2YAML, - out: node_v1alpha3YAML, - groupVersion: kubeadmapiv1alpha3.SchemeGroupVersion, - }, + // TODO: implement v1beta1 <-> internal after introducing v1beta1 + // TODO: implement v1alpha3 -> internal -> v1beta1 after introducing v1beta1 { // v1alpha3 -> internal -> v1alpha3 name: "v1alpha3Tov1alpha3", in: node_v1alpha3YAML, @@ -71,13 +61,8 @@ func TestNodeConfigFileAndDefaultsToInternalConfig(t *testing.T) { }, // These tests are reading one file that has only a subset of the fields populated, loading it using NodeConfigFileAndDefaultsToInternalConfig, // and then marshals the internal object to the expected groupVersion - { // v1alpha2 -> default -> validate -> internal -> v1alpha3 - name: "incompleteYAMLToDefaulted", - in: node_incompleteYAML, - out: node_defaultedYAML, - groupVersion: kubeadmapiv1alpha3.SchemeGroupVersion, - }, - { // v1alpha2 -> validation should fail + // TODO: implement v1alpha3 -> default -> validate -> v1beta1 -> v1alpha3 after introducing v1beta1 + { // v1alpha3 -> validation should fail name: "invalidYAMLShouldFail", in: node_invalidYAML, expectedErr: true, diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml index c6f7b27a26e..9ad61455ddc 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/internal.yaml @@ -171,7 +171,7 @@ Etcd: ServerCertSANs: null FeatureGates: null ImageRepository: k8s.gcr.io -KubernetesVersion: v1.10.2 +KubernetesVersion: v1.11.2 Networking: DNSDomain: cluster.local PodSubnet: "" diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml deleted file mode 100644 index 01f4137dd71..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha2.yaml +++ /dev/null @@ -1,150 +0,0 @@ -api: - advertiseAddress: 192.168.2.2 - bindPort: 6443 - controlPlaneEndpoint: "" -apiServerExtraArgs: - authorization-mode: Node,RBAC,Webhook -apiVersion: kubeadm.k8s.io/v1alpha2 -auditPolicy: - logDir: /var/log/kubernetes/audit - logMaxAge: 2 - path: "" -bootstrapTokens: -- groups: - - system:bootstrappers:kubeadm:default-node-token - token: s73ybu.6tw6wnqgp5z0wb77 - ttl: 24h0m0s - usages: - - signing - - authentication -certificatesDir: /etc/kubernetes/pki -clusterName: kubernetes -etcd: - local: - dataDir: /var/lib/etcd - image: "" -imageRepository: k8s.gcr.io -kind: MasterConfiguration -kubeProxy: - config: - bindAddress: 0.0.0.0 - clientConnection: - acceptContentTypes: "" - burst: 10 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: /var/lib/kube-proxy/kubeconfig.conf - qps: 5 - clusterCIDR: "" - configSyncPeriod: 15m0s - conntrack: - max: null - maxPerCore: 32768 - min: 131072 - tcpCloseWaitTimeout: 1h0m0s - tcpEstablishedTimeout: 24h0m0s - enableProfiling: false - featureGates: - ServiceNodeExclusion: true - SupportIPVSProxyMode: true - healthzBindAddress: 0.0.0.0:10256 - hostnameOverride: "" - iptables: - masqueradeAll: false - masqueradeBit: 14 - minSyncPeriod: 0s - syncPeriod: 30s - ipvs: - excludeCIDRs: null - minSyncPeriod: 0s - scheduler: "" - syncPeriod: 30s - metricsBindAddress: 127.0.0.1:10249 - mode: iptables - nodePortAddresses: null - oomScoreAdj: -999 - portRange: "" - resourceContainer: /kube-proxy - udpIdleTimeout: 250ms -kubeletConfiguration: - baseConfig: - address: 1.2.3.4 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 2m0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 5m0s - cacheUnauthorizedTTL: 30s - cgroupDriver: cgroupfs - cgroupsPerQOS: true - clusterDNS: - - 10.96.0.10 - clusterDomain: cluster.local - configMapAndSecretChangeDetectionStrategy: Watch - containerLogMaxFiles: 5 - containerLogMaxSize: 10Mi - contentType: application/vnd.kubernetes.protobuf - cpuCFSQuota: true - cpuCFSQuotaPeriod: 0s - cpuManagerPolicy: none - cpuManagerReconcilePeriod: 10s - enableControllerAttachDetach: true - enableDebuggingHandlers: true - enforceNodeAllocatable: - - pods - eventBurst: 10 - eventRecordQPS: 5 - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 5m0s - failSwapOn: true - fileCheckFrequency: 20s - hairpinMode: promiscuous-bridge - healthzBindAddress: 127.0.0.1 - healthzPort: 10248 - httpCheckFrequency: 20s - imageGCHighThresholdPercent: 85 - imageGCLowThresholdPercent: 80 - imageMinimumGCAge: 2m0s - iptablesDropBit: 15 - iptablesMasqueradeBit: 14 - kubeAPIBurst: 10 - kubeAPIQPS: 5 - makeIPTablesUtilChains: true - maxOpenFiles: 1000000 - maxPods: 110 - nodeStatusUpdateFrequency: 10s - oomScoreAdj: -999 - podPidsLimit: -1 - port: 10250 - registryBurst: 10 - registryPullQPS: 5 - resolvConf: /etc/resolv.conf - rotateCertificates: true - runtimeRequestTimeout: 2m0s - serializeImagePulls: true - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 4h0m0s - syncFrequency: 1m0s - volumeStatsAggPeriod: 1m0s -kubernetesVersion: v1.10.2 -networking: - dnsDomain: cluster.local - podSubnet: "" - serviceSubnet: 10.96.0.0/12 -nodeRegistration: - criSocket: /var/run/dockershim.sock - name: master-1 - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -unifiedControlPlaneImage: "" diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml index f7be2f9bd71..4c9aadbd336 100644 --- a/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml +++ b/cmd/kubeadm/app/util/config/testdata/conversion/master/v1alpha3.yaml @@ -34,7 +34,7 @@ etcd: image: "" imageRepository: k8s.gcr.io kind: ClusterConfiguration -kubernetesVersion: v1.10.2 +kubernetesVersion: v1.11.2 networking: dnsDomain: cluster.local podSubnet: "" diff --git a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml b/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml deleted file mode 100644 index 4b5e36e923f..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/conversion/node/v1alpha2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -advertiseAddress: 192.168.2.2 -apiVersion: kubeadm.k8s.io/v1alpha2 -caCertPath: /etc/kubernetes/pki/ca.crt -clusterName: kubernetes -discoveryFile: "" -discoveryTimeout: 5m0s -discoveryToken: abcdef.0123456789abcdef -discoveryTokenAPIServers: -- kube-apiserver:6443 -discoveryTokenUnsafeSkipCAVerification: true -kind: NodeConfiguration -nodeRegistration: - criSocket: /var/run/dockershim.sock - name: master-1 -tlsBootstrapToken: abcdef.0123456789abcdef -token: abcdef.0123456789abcdef diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml index 89f02bca5ce..78242aa2764 100644 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml +++ b/cmd/kubeadm/app/util/config/testdata/defaulting/master/defaulted.yaml @@ -32,7 +32,7 @@ etcd: image: "" imageRepository: my-company.com kind: ClusterConfiguration -kubernetesVersion: v1.10.2 +kubernetesVersion: v1.11.2 networking: dnsDomain: cluster.global podSubnet: 10.148.0.0/16 diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml deleted file mode 100644 index 15b78937f3c..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/master/incomplete.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: kubeadm.k8s.io/v1alpha2 -kind: MasterConfiguration -api: - advertiseAddress: 192.168.2.2 - bindPort: 6443 -bootstrapTokens: -- token: s73ybu.6tw6wnqgp5z0wb77 -certificatesDir: /var/lib/kubernetes/pki -clusterName: kubernetes -imageRepository: my-company.com -kubernetesVersion: v1.10.2 -networking: - dnsDomain: cluster.global - serviceSubnet: 10.196.0.0/12 - podSubnet: 10.148.0.0/16 -nodeRegistration: - criSocket: /var/run/criruntime.sock - name: master-1 diff --git a/cmd/kubeadm/app/util/config/testdata/defaulting/node/incomplete.yaml b/cmd/kubeadm/app/util/config/testdata/defaulting/node/incomplete.yaml deleted file mode 100644 index 149f8f2770f..00000000000 --- a/cmd/kubeadm/app/util/config/testdata/defaulting/node/incomplete.yaml +++ /dev/null @@ -1,9 +0,0 @@ -advertiseAddress: 192.168.2.2 -apiVersion: kubeadm.k8s.io/v1alpha2 -kind: NodeConfiguration -discoveryTokenAPIServers: -- kube-apiserver:6443 -discoveryTokenUnsafeSkipCAVerification: true -nodeRegistration: - name: thegopher -token: abcdef.0123456789abcdef diff --git a/cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml b/cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml index bc8482a5ca1..5579fd38d8e 100644 --- a/cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml +++ b/cmd/kubeadm/app/util/config/testdata/validation/invalid_mastercfg.yaml @@ -1,16 +1,7 @@ -apiVersion: kubeadm.k8s.io/v1alpha2 -kind: MasterConfiguration -api: - bindPort: 0 -bootstrapTokens: -- token: s7bu.6tw6wn -certificatesDir: relativepath -clusterName: kubernetes -imageRepository: my-company.com -kubernetesVersion: v1.10.2 +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration networking: - dnsDomain: cluster.GLOBAL - serviceSubnet: 10.196.1000.0/100 -nodeRegistration: - criSocket: relativepath - name: MASTER + dnsDomain: INVALID-DOMAIN-!!!! + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +unifiedControlPlaneImage: "" \ No newline at end of file diff --git a/cmd/kubeadm/app/util/config/testdata/validation/invalid_nodecfg.yaml b/cmd/kubeadm/app/util/config/testdata/validation/invalid_nodecfg.yaml index 997c19ccbeb..34498e89527 100644 --- a/cmd/kubeadm/app/util/config/testdata/validation/invalid_nodecfg.yaml +++ b/cmd/kubeadm/app/util/config/testdata/validation/invalid_nodecfg.yaml @@ -1,12 +1,7 @@ apiVersion: kubeadm.k8s.io/v1alpha2 kind: NodeConfiguration -caCertPath: relativepath -discoveryFile: relativepath -discoveryTimeout: not-a-time -discoveryTokenAPIServers: -- INVALID_URL -discoveryTokenUnsafeSkipCAVerification: false -nodeRegistration: - criSocket: relativepath - name: NODE-1 -token: invalidtoken +apiEndpoint: + advertiseAddress: INVALID-ADDRESS-!!!! + bindPort: 6443 +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: JoinConfiguration diff --git a/cmd/kubeadm/app/util/marshal.go b/cmd/kubeadm/app/util/marshal.go index a22f62ba62c..6e95dcc7151 100644 --- a/cmd/kubeadm/app/util/marshal.go +++ b/cmd/kubeadm/app/util/marshal.go @@ -156,10 +156,10 @@ func GroupVersionKindsHasClusterConfiguration(gvks ...schema.GroupVersionKind) b func GroupVersionKindsHasInitConfiguration(gvks ...schema.GroupVersionKind) bool { // Finding a MasterConfiguration kind is also okay, as it will decode and convert into an InitConfiguration struct eventually // TODO: When we remove support for the v1alpha2 API, remove support for MasterConfiguration - return GroupVersionKindsHasKind(gvks, constants.InitConfigurationKind) || GroupVersionKindsHasKind(gvks, constants.MasterConfigurationKind) + return GroupVersionKindsHasKind(gvks, constants.InitConfigurationKind) } // GroupVersionKindsHasJoinConfiguration returns whether the following gvk slice contains a JoinConfiguration object func GroupVersionKindsHasJoinConfiguration(gvks ...schema.GroupVersionKind) bool { - return GroupVersionKindsHasKind(gvks, constants.JoinConfigurationKind) || GroupVersionKindsHasKind(gvks, constants.NodeConfigurationKind) + return GroupVersionKindsHasKind(gvks, constants.JoinConfigurationKind) } diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go index 9db5590d25f..8df649594af 100644 --- a/cmd/kubeadm/app/util/staticpod/utils.go +++ b/cmd/kubeadm/app/util/staticpod/utils.go @@ -67,6 +67,7 @@ func ComponentPod(container v1.Container, volumes map[string]v1.Volume) v1.Pod { Containers: []v1.Container{container}, PriorityClassName: "system-cluster-critical", HostNetwork: true, + DNSPolicy: v1.DNSClusterFirstWithHostNet, Volumes: VolumeMapToSlice(volumes), }, } diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go index 4a2e3791d3e..ea9a1657d4e 100644 --- a/cmd/kubeadm/app/util/staticpod/utils_test.go +++ b/cmd/kubeadm/app/util/staticpod/utils_test.go @@ -380,6 +380,7 @@ func TestComponentPod(t *testing.T) { }, PriorityClassName: "system-cluster-critical", HostNetwork: true, + DNSPolicy: v1.DNSClusterFirstWithHostNet, Volumes: []v1.Volume{}, }, }, diff --git a/cmd/kubeadm/app/util/system/docker_validator.go b/cmd/kubeadm/app/util/system/docker_validator.go index d6895fa90d4..b31bacd208b 100644 --- a/cmd/kubeadm/app/util/system/docker_validator.go +++ b/cmd/kubeadm/app/util/system/docker_validator.go @@ -37,8 +37,8 @@ func (d *DockerValidator) Name() string { } const ( - dockerConfigPrefix = "DOCKER_" - maxDockerValidatedVersion = "17.03" + dockerConfigPrefix = "DOCKER_" + latestValidatedDockerVersion = "18.06" ) // TODO(random-liu): Add more validating items. @@ -78,9 +78,9 @@ func (d *DockerValidator) validateDockerInfo(spec *DockerSpec, info types.Info) if r.MatchString(info.ServerVersion) { d.Reporter.Report(dockerConfigPrefix+"VERSION", info.ServerVersion, good) w := fmt.Errorf( - "docker version is greater than the most recently validated version. Docker version: %s. Max validated version: %s", + "this Docker version is not on the list of validated versions: %s. Latest validated version: %s", info.ServerVersion, - maxDockerValidatedVersion, + latestValidatedDockerVersion, ) return w, nil } diff --git a/cmd/kubeadm/app/util/system/docker_validator_test.go b/cmd/kubeadm/app/util/system/docker_validator_test.go index 3ee315ef0db..058143e5674 100644 --- a/cmd/kubeadm/app/util/system/docker_validator_test.go +++ b/cmd/kubeadm/app/util/system/docker_validator_test.go @@ -28,7 +28,7 @@ func TestValidateDockerInfo(t *testing.T) { Reporter: DefaultReporter, } spec := &DockerSpec{ - Version: []string{`1\.1[1-3]\..*`, `17\.03\..*`}, // Requires [1.11, 17.03]. + Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.06\..*`}, GraphDriver: []string{"driver_1", "driver_2"}, } for _, test := range []struct { @@ -69,7 +69,17 @@ func TestValidateDockerInfo(t *testing.T) { { info: types.Info{Driver: "driver_2", ServerVersion: "17.06.0-ce"}, err: false, - warn: true, + warn: false, + }, + { + info: types.Info{Driver: "driver_2", ServerVersion: "17.09.0-ce"}, + err: false, + warn: false, + }, + { + info: types.Info{Driver: "driver_2", ServerVersion: "18.06.0-ce"}, + err: false, + warn: false, }, } { warn, err := v.validateDockerInfo(spec, test.info) diff --git a/cmd/kubeadm/app/util/system/kernel_validator.go b/cmd/kubeadm/app/util/system/kernel_validator.go index e0a91b52519..3288ea578dc 100644 --- a/cmd/kubeadm/app/util/system/kernel_validator.go +++ b/cmd/kubeadm/app/util/system/kernel_validator.go @@ -78,7 +78,6 @@ func (k *KernelValidator) Validate(spec SysSpec) (error, error) { // validateKernelVersion validates the kernel version. func (k *KernelValidator) validateKernelVersion(kSpec KernelSpec) error { - glog.V(1).Info("Validating kernel version") versionRegexps := kSpec.Versions for _, versionRegexp := range versionRegexps { r := regexp.MustCompile(versionRegexp) @@ -93,7 +92,6 @@ func (k *KernelValidator) validateKernelVersion(kSpec KernelSpec) error { // validateKernelConfig validates the kernel configurations. func (k *KernelValidator) validateKernelConfig(kSpec KernelSpec) error { - glog.V(1).Info("Validating kernel config") allConfig, err := k.getKernelConfig() if err != nil { return fmt.Errorf("failed to parse kernel config: %v", err) diff --git a/cmd/kubeadm/app/util/system/types_unix.go b/cmd/kubeadm/app/util/system/types_unix.go index be4b6b2606e..71a26c0e6e4 100644 --- a/cmd/kubeadm/app/util/system/types_unix.go +++ b/cmd/kubeadm/app/util/system/types_unix.go @@ -62,7 +62,7 @@ var DefaultSysSpec = SysSpec{ Cgroups: []string{"cpu", "cpuacct", "cpuset", "devices", "freezer", "memory"}, RuntimeSpec: RuntimeSpec{ DockerSpec: &DockerSpec{ - Version: []string{`1\.1[1-3]\..*`, `17\.03\..*`}, // Requires [1.11, 17.03] + Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.06\..*`}, GraphDriver: []string{"aufs", "overlay", "overlay2", "devicemapper", "zfs"}, }, }, diff --git a/cmd/kubeadm/app/util/system/types_windows.go b/cmd/kubeadm/app/util/system/types_windows.go index 5047013e6a4..fb85049cdd7 100644 --- a/cmd/kubeadm/app/util/system/types_windows.go +++ b/cmd/kubeadm/app/util/system/types_windows.go @@ -38,7 +38,7 @@ var DefaultSysSpec = SysSpec{ Cgroups: []string{}, RuntimeSpec: RuntimeSpec{ DockerSpec: &DockerSpec{ - Version: []string{`17\.03\..*`}, //Requires [17.03] or later + Version: []string{`18\.06\..*`}, //Requires [18.06] or later GraphDriver: []string{"windowsfilter"}, }, }, diff --git a/cmd/kubeadm/app/util/system/validators.go b/cmd/kubeadm/app/util/system/validators.go index 7819da7b4a6..c1f1c15ae45 100644 --- a/cmd/kubeadm/app/util/system/validators.go +++ b/cmd/kubeadm/app/util/system/validators.go @@ -17,7 +17,7 @@ limitations under the License. package system import ( - "github.com/golang/glog" + "fmt" "k8s.io/apimachinery/pkg/util/errors" ) @@ -41,7 +41,7 @@ func Validate(spec SysSpec, validators []Validator) (error, error) { var warns []error for _, v := range validators { - glog.Infof("Validating %s...", v.Name()) + fmt.Printf("Validating %s...\n", v.Name()) warn, err := v.Validate(spec) errs = append(errs, err) warns = append(warns, warn) diff --git a/cmd/kubeadm/app/util/version.go b/cmd/kubeadm/app/util/version.go index 15cc900e992..98109d76264 100644 --- a/cmd/kubeadm/app/util/version.go +++ b/cmd/kubeadm/app/util/version.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" netutil "k8s.io/apimachinery/pkg/util/net" - versionutil "k8s.io/kubernetes/pkg/util/version" + versionutil "k8s.io/apimachinery/pkg/util/version" pkgversion "k8s.io/kubernetes/pkg/version" ) diff --git a/cmd/kubeadm/test/cmd/init_test.go b/cmd/kubeadm/test/cmd/init_test.go index b150c54d8eb..f8dcf3c84ea 100644 --- a/cmd/kubeadm/test/cmd/init_test.go +++ b/cmd/kubeadm/test/cmd/init_test.go @@ -144,15 +144,16 @@ func TestCmdInitConfig(t *testing.T) { expected: false, }, { - name: "can load v1alpha2 config", + name: "can't load v1alpha2 config", args: "--config=testdata/init/v1alpha2.yaml", - expected: true, + expected: false, }, { name: "can load v1alpha3 config", args: "--config=testdata/init/v1alpha3.yaml", expected: true, }, + // TODO: implement v1beta1 tests after introducing v1beta1 { name: "don't allow mixed arguments", args: "--kubernetes-version=1.11.0 --config=testdata/init/v1alpha3.yaml", diff --git a/cmd/kubeadm/test/util.go b/cmd/kubeadm/test/util.go index 1b55bdec405..07bc630766a 100644 --- a/cmd/kubeadm/test/util.go +++ b/cmd/kubeadm/test/util.go @@ -66,7 +66,7 @@ func SetupInitConfigurationFile(t *testing.T, tmpdir string, cfg *kubeadmapi.Ini apiVersion: kubeadm.k8s.io/v1alpha3 kind: ClusterConfiguration certificatesDir: {{.CertificatesDir}} - kubernetesVersion: v1.10.0 + kubernetesVersion: v1.11.0 `))) f, err := os.Create(cfgPath) diff --git a/cmd/kubelet/app/BUILD b/cmd/kubelet/app/BUILD index aa1948c9dfb..f5f79cfb13f 100644 --- a/cmd/kubelet/app/BUILD +++ b/cmd/kubelet/app/BUILD @@ -69,7 +69,7 @@ go_library( "//pkg/version:go_default_library", "//pkg/version/verflag:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/aws_ebs:go_default_library", + "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", "//pkg/volume/azure_file:go_default_library", "//pkg/volume/cephfs:go_default_library", diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index 4f3c39223de..f651ef81e94 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -26,7 +26,7 @@ import ( "k8s.io/utils/exec" // Volume plugins "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/aws_ebs" + "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/azure_file" "k8s.io/kubernetes/pkg/volume/cephfs" @@ -71,7 +71,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin { // // Kubelet does not currently need to configure volume plugins. // If/when it does, see kube-controller-manager/app/plugins.go for example of using volume.VolumeConfig - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...) diff --git a/docs/.generated_docs b/docs/.generated_docs index d6662f5c58f..294c91fd39f 100644 --- a/docs/.generated_docs +++ b/docs/.generated_docs @@ -30,6 +30,15 @@ docs/admin/kubeadm_alpha_phase_certs_etcd-peer.md docs/admin/kubeadm_alpha_phase_certs_etcd-server.md docs/admin/kubeadm_alpha_phase_certs_front-proxy-ca.md docs/admin/kubeadm_alpha_phase_certs_front-proxy-client.md +docs/admin/kubeadm_alpha_phase_certs_renew.md +docs/admin/kubeadm_alpha_phase_certs_renew_all.md +docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-etcd-client.md +docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-kubelet-client.md +docs/admin/kubeadm_alpha_phase_certs_renew_apiserver.md +docs/admin/kubeadm_alpha_phase_certs_renew_etcd-healthcheck-client.md +docs/admin/kubeadm_alpha_phase_certs_renew_etcd-peer.md +docs/admin/kubeadm_alpha_phase_certs_renew_etcd-server.md +docs/admin/kubeadm_alpha_phase_certs_renew_front-proxy-client.md docs/admin/kubeadm_alpha_phase_certs_sa.md docs/admin/kubeadm_alpha_phase_controlplane.md docs/admin/kubeadm_alpha_phase_controlplane_all.md @@ -115,6 +124,15 @@ docs/man/man1/kubeadm-alpha-phase-certs-etcd-peer.1 docs/man/man1/kubeadm-alpha-phase-certs-etcd-server.1 docs/man/man1/kubeadm-alpha-phase-certs-front-proxy-ca.1 docs/man/man1/kubeadm-alpha-phase-certs-front-proxy-client.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-all.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-etcd-client.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-kubelet-client.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-healthcheck-client.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-peer.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-server.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew-front-proxy-client.1 +docs/man/man1/kubeadm-alpha-phase-certs-renew.1 docs/man/man1/kubeadm-alpha-phase-certs-sa.1 docs/man/man1/kubeadm-alpha-phase-certs.1 docs/man/man1/kubeadm-alpha-phase-controlplane-all.1 @@ -176,7 +194,6 @@ docs/man/man1/kubeadm-upgrade-plan.1 docs/man/man1/kubeadm-upgrade.1 docs/man/man1/kubeadm-version.1 docs/man/man1/kubeadm.1 -docs/man/man1/kubectl-alpha-diff.1 docs/man/man1/kubectl-alpha.1 docs/man/man1/kubectl-annotate.1 docs/man/man1/kubectl-api-resources.1 @@ -237,6 +254,7 @@ docs/man/man1/kubectl-create-serviceaccount.1 docs/man/man1/kubectl-create.1 docs/man/man1/kubectl-delete.1 docs/man/man1/kubectl-describe.1 +docs/man/man1/kubectl-diff.1 docs/man/man1/kubectl-drain.1 docs/man/man1/kubectl-edit.1 docs/man/man1/kubectl-exec.1 @@ -279,8 +297,6 @@ docs/man/man1/kubectl-wait.1 docs/man/man1/kubectl.1 docs/man/man1/kubelet.1 docs/user-guide/kubectl/kubectl.md -docs/user-guide/kubectl/kubectl_alpha.md -docs/user-guide/kubectl/kubectl_alpha_diff.md docs/user-guide/kubectl/kubectl_annotate.md docs/user-guide/kubectl/kubectl_api-resources.md docs/user-guide/kubectl/kubectl_api-versions.md @@ -340,6 +356,7 @@ docs/user-guide/kubectl/kubectl_create_service_nodeport.md docs/user-guide/kubectl/kubectl_create_serviceaccount.md docs/user-guide/kubectl/kubectl_delete.md docs/user-guide/kubectl/kubectl_describe.md +docs/user-guide/kubectl/kubectl_diff.md docs/user-guide/kubectl/kubectl_drain.md docs/user-guide/kubectl/kubectl_edit.md docs/user-guide/kubectl/kubectl_exec.md @@ -396,6 +413,7 @@ docs/yaml/kubectl/kubectl_cp.yaml docs/yaml/kubectl/kubectl_create.yaml docs/yaml/kubectl/kubectl_delete.yaml docs/yaml/kubectl/kubectl_describe.yaml +docs/yaml/kubectl/kubectl_diff.yaml docs/yaml/kubectl/kubectl_drain.yaml docs/yaml/kubectl/kubectl_edit.yaml docs/yaml/kubectl/kubectl_exec.yaml diff --git a/docs/man/man1/kubectl-alpha-diff.1 b/docs/admin/kubeadm_alpha_phase_certs_renew.md similarity index 100% rename from docs/man/man1/kubectl-alpha-diff.1 rename to docs/admin/kubeadm_alpha_phase_certs_renew.md diff --git a/docs/user-guide/kubectl/kubectl_alpha.md b/docs/admin/kubeadm_alpha_phase_certs_renew_all.md similarity index 100% rename from docs/user-guide/kubectl/kubectl_alpha.md rename to docs/admin/kubeadm_alpha_phase_certs_renew_all.md diff --git a/docs/user-guide/kubectl/kubectl_alpha_diff.md b/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-etcd-client.md similarity index 100% rename from docs/user-guide/kubectl/kubectl_alpha_diff.md rename to docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-etcd-client.md diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-kubelet-client.md b/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-kubelet-client.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver-kubelet-client.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver.md b/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_apiserver.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-healthcheck-client.md b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-healthcheck-client.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-healthcheck-client.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-peer.md b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-peer.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-peer.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-server.md b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-server.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_etcd-server.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/admin/kubeadm_alpha_phase_certs_renew_front-proxy-client.md b/docs/admin/kubeadm_alpha_phase_certs_renew_front-proxy-client.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/admin/kubeadm_alpha_phase_certs_renew_front-proxy-client.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/api-reference/apps/v1/definitions.html b/docs/api-reference/apps/v1/definitions.html index b2ced0d6d4a..80670e0366b 100755 --- a/docs/api-reference/apps/v1/definitions.html +++ b/docs/api-reference/apps/v1/definitions.html @@ -4090,6 +4090,13 @@ When an object is created, the system will populate this list with the current s

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/apps/v1beta1/definitions.html b/docs/api-reference/apps/v1beta1/definitions.html index 4dfe0ad579a..88288d917d9 100755 --- a/docs/api-reference/apps/v1beta1/definitions.html +++ b/docs/api-reference/apps/v1beta1/definitions.html @@ -4143,6 +4143,13 @@ The StatefulSet guarantees that a given network identity will always map to the

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/apps/v1beta1/operations.html b/docs/api-reference/apps/v1beta1/operations.html index af86f9065e3..719d041c43e 100755 --- a/docs/api-reference/apps/v1beta1/operations.html +++ b/docs/api-reference/apps/v1beta1/operations.html @@ -2982,17 +2982,17 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

202

Accepted

-

v1beta1.DeploymentStatus

+

v1.Status

200

success

-

v1beta1.DeploymentStatus

+

v1.Status

201

Created

-

v1beta1.DeploymentStatus

+

v1.Status

diff --git a/docs/api-reference/apps/v1beta2/definitions.html b/docs/api-reference/apps/v1beta2/definitions.html index 4d664bb7e31..36c56eddf49 100755 --- a/docs/api-reference/apps/v1beta2/definitions.html +++ b/docs/api-reference/apps/v1beta2/definitions.html @@ -4759,6 +4759,13 @@ The StatefulSet guarantees that a given network identity will always map to the

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/batch/v1/definitions.html b/docs/api-reference/batch/v1/definitions.html index dcd059bbc4d..e635111144c 100755 --- a/docs/api-reference/batch/v1/definitions.html +++ b/docs/api-reference/batch/v1/definitions.html @@ -3375,6 +3375,13 @@ When an object is created, the system will populate this list with the current s

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/batch/v1beta1/definitions.html b/docs/api-reference/batch/v1beta1/definitions.html index c3bca7454ef..5da6593ca27 100755 --- a/docs/api-reference/batch/v1beta1/definitions.html +++ b/docs/api-reference/batch/v1beta1/definitions.html @@ -3409,6 +3409,13 @@ When an object is created, the system will populate this list with the current s

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/batch/v2alpha1/definitions.html b/docs/api-reference/batch/v2alpha1/definitions.html index 51a06d431be..d298f38d022 100755 --- a/docs/api-reference/batch/v2alpha1/definitions.html +++ b/docs/api-reference/batch/v2alpha1/definitions.html @@ -3382,6 +3382,13 @@ When an object is created, the system will populate this list with the current s

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/extensions/v1beta1/definitions.html b/docs/api-reference/extensions/v1beta1/definitions.html index 82dbeb493c1..864f2b1177c 100755 --- a/docs/api-reference/extensions/v1beta1/definitions.html +++ b/docs/api-reference/extensions/v1beta1/definitions.html @@ -4734,6 +4734,13 @@ When an object is created, the system will populate this list with the current s

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/api-reference/extensions/v1beta1/operations.html b/docs/api-reference/extensions/v1beta1/operations.html index 06dd839dd53..b880ff52a78 100755 --- a/docs/api-reference/extensions/v1beta1/operations.html +++ b/docs/api-reference/extensions/v1beta1/operations.html @@ -3541,17 +3541,17 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }

202

Accepted

-

v1beta1.DeploymentStatus

+

v1.Status

200

success

-

v1beta1.DeploymentStatus

+

v1.Status

201

Created

-

v1beta1.DeploymentStatus

+

v1.Status

diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index 1a664c1b577..c5460a21c3f 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -9780,6 +9780,13 @@ More info:

string

+ +

enableServiceLinks

+

EnableServiceLinks indicates whether information about services should be injected into pod’s environment variables, matching the syntax of Docker links.

+

false

+

boolean

+

false

+ diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-all.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-all.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-all.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-etcd-client.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-etcd-client.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-etcd-client.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-kubelet-client.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-kubelet-client.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver-kubelet-client.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-apiserver.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-healthcheck-client.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-healthcheck-client.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-healthcheck-client.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-peer.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-peer.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-peer.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-server.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-server.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-etcd-server.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew-front-proxy-client.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew-front-proxy-client.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew-front-proxy-client.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubeadm-alpha-phase-certs-renew.1 b/docs/man/man1/kubeadm-alpha-phase-certs-renew.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubeadm-alpha-phase-certs-renew.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/man/man1/kubectl-diff.1 b/docs/man/man1/kubectl-diff.1 new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/man/man1/kubectl-diff.1 @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/user-guide/kubectl/kubectl_diff.md b/docs/user-guide/kubectl/kubectl_diff.md new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/user-guide/kubectl/kubectl_diff.md @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/docs/yaml/kubectl/kubectl_diff.yaml b/docs/yaml/kubectl/kubectl_diff.yaml new file mode 100644 index 00000000000..b6fd7a0f989 --- /dev/null +++ b/docs/yaml/kubectl/kubectl_diff.yaml @@ -0,0 +1,3 @@ +This file is autogenerated, but we've stopped checking such files into the +repository to reduce the need for rebases. Please run hack/generate-docs.sh to +populate this file. diff --git a/hack/.golint_failures b/hack/.golint_failures index f9e25ee4715..347bc3ea369 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -6,21 +6,12 @@ cmd/kube-apiserver/app cmd/kube-controller-manager/app cmd/kube-proxy/app cmd/kubeadm/app -cmd/kubeadm/app/apis/kubeadm/v1alpha2 cmd/kubeadm/app/apis/kubeadm/v1alpha3 cmd/kubeadm/app/util/config cmd/kubeadm/app/util/system cmd/kubelet/app cmd/kubelet/app/options cmd/kubemark -pkg/api/endpoints -pkg/api/ref -pkg/api/testapi -pkg/api/testing -pkg/api/testing/compat -pkg/api/v1/endpoints -pkg/api/v1/pod -pkg/api/v1/resource pkg/apis/abac pkg/apis/abac/latest pkg/apis/abac/v0 @@ -97,9 +88,6 @@ pkg/apis/storage/v1beta1 pkg/apis/storage/v1beta1/util pkg/auth/authorizer/abac pkg/capabilities -pkg/client/chaosclient -pkg/client/leaderelectionconfig -pkg/client/tests pkg/cloudprovider pkg/cloudprovider/providers/aws pkg/cloudprovider/providers/fake @@ -131,7 +119,6 @@ pkg/controller/podautoscaler pkg/controller/podautoscaler/metrics pkg/controller/podgc pkg/controller/replicaset -pkg/controller/replicaset/options pkg/controller/replication pkg/controller/resourcequota pkg/controller/route @@ -156,7 +143,6 @@ pkg/kubeapiserver/authenticator pkg/kubeapiserver/authorizer pkg/kubeapiserver/authorizer/modes pkg/kubeapiserver/options -pkg/kubeapiserver/server pkg/kubectl pkg/kubectl/apps pkg/kubectl/cmd @@ -211,12 +197,8 @@ pkg/kubelet/dockershim/network/hostport/testing pkg/kubelet/dockershim/network/kubenet pkg/kubelet/dockershim/network/testing pkg/kubelet/events -pkg/kubelet/images -pkg/kubelet/kuberuntime -pkg/kubelet/leaky pkg/kubelet/lifecycle pkg/kubelet/metrics -pkg/kubelet/pleg pkg/kubelet/pod pkg/kubelet/pod/testing pkg/kubelet/preemption @@ -262,7 +244,7 @@ pkg/proxy/userspace pkg/proxy/util pkg/proxy/winkernel pkg/proxy/winuserspace -pkg/quota/evaluator/core +pkg/quota/v1/evaluator/core pkg/registry/admissionregistration/initializerconfiguration/storage pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage pkg/registry/admissionregistration/rest @@ -401,7 +383,6 @@ pkg/util/tolerations pkg/util/workqueue/prometheus pkg/version/verflag pkg/volume -pkg/volume/aws_ebs pkg/volume/azure_dd pkg/volume/azure_file pkg/volume/cephfs diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 9eb52a7d20b..3b01774febe 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -523,14 +523,14 @@ kube::golang::delete_coverage_dummy_test() { # Arguments: a list of kubernetes packages to build. # Expected variables: ${build_args} should be set to an array of Go build arguments. # In addition, ${package} and ${platform} should have been set earlier, and if -# ${build_with_coverage} is set, coverage instrumentation will be enabled. +# ${KUBE_BUILD_WITH_COVERAGE} is set, coverage instrumentation will be enabled. # # Invokes Go to actually build some packages. If coverage is disabled, simply invokes # go install. If coverage is enabled, builds covered binaries using go test, temporarily # producing the required unit test files and then cleaning up after itself. # Non-covered binaries are then built using go install as usual. kube::golang::build_some_binaries() { - if [[ -n "${build_with_coverage:-}" ]]; then + if [[ -n "${KUBE_BUILD_WITH_COVERAGE:-}" ]]; then local -a uncovered=() for package in "$@"; do if kube::golang::is_instrumented_package "${package}"; then @@ -586,6 +586,7 @@ kube::golang::build_binaries_for_platform() { -installsuffix static ${goflags:+"${goflags[@]}"} -gcflags "${gogcflags:-}" + -asmflags "${goasmflags:-}" -ldflags "${goldflags:-}" ) CGO_ENABLED=0 kube::golang::build_some_binaries "${statics[@]}" @@ -595,6 +596,7 @@ kube::golang::build_binaries_for_platform() { build_args=( ${goflags:+"${goflags[@]}"} -gcflags "${gogcflags:-}" + -asmflags "${goasmflags:-}" -ldflags "${goldflags:-}" ) kube::golang::build_some_binaries "${nonstatics[@]}" @@ -608,6 +610,7 @@ kube::golang::build_binaries_for_platform() { go test -c \ ${goflags:+"${goflags[@]}"} \ -gcflags "${gogcflags:-}" \ + -asmflags "${goasmflags:-}" \ -ldflags "${goldflags:-}" \ -o "${outfile}" \ "${testpkg}" @@ -661,11 +664,11 @@ kube::golang::build_binaries() { host_platform=$(kube::golang::host_platform) # Use eval to preserve embedded quoted strings. - local goflags goldflags gogcflags build_with_coverage + local goflags goldflags goasmflags gogcflags eval "goflags=(${GOFLAGS:-})" - goldflags="${GOLDFLAGS:-} $(kube::version::ldflags)" - gogcflags="${GOGCFLAGS:-}" - build_with_coverage="${KUBE_BUILD_WITH_COVERAGE:-}" + goldflags="${GOLDFLAGS:-} -s -w $(kube::version::ldflags)" + goasmflags="-trimpath=${KUBE_ROOT}" + gogcflags="${GOGCFLAGS:-} -trimpath=${KUBE_ROOT}" local -a targets=() local arg diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index bcf988bdc10..4b797fa42bf 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -47,6 +47,8 @@ FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1} CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true} # name of the cgroup driver, i.e. cgroupfs or systemd CGROUP_DRIVER=${CGROUP_DRIVER:-""} +# if cgroups per qos is enabled, optionally change cgroup root +CGROUP_ROOT=${CGROUP_ROOT:-""} # owner of client certs, default to current user if not specified USER=${USER:-$(whoami)} @@ -141,6 +143,7 @@ fi set -e source "${KUBE_ROOT}/hack/lib/init.sh" +kube::util::ensure-gnu-sed function usage { echo "This script starts a local kube cluster. " @@ -231,6 +234,9 @@ ROOT_CA_FILE=${CERT_DIR}/server-ca.crt ROOT_CA_KEY=${CERT_DIR}/server-ca.key CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${ROOT_CA_FILE}"} CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${ROOT_CA_KEY}"} +# Reuse certs will skip generate new ca/cert files under CERT_DIR +# it's useful with PRESERVE_ETCD=true because new ca will make existed service account secrets invalided +REUSE_CERTS=${REUSE_CERTS:-false} # name of the cgroup driver, i.e. cgroupfs or systemd if [[ ${CONTAINER_RUNTIME} == "docker" ]]; then @@ -449,6 +455,39 @@ function set_service_accounts { fi } +function generate_certs { + # Create CA signers + if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then + kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"' + sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key" + sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt" + sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json" + else + kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"' + kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"' + fi + + # Create auth proxy client ca + kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"' + + # serving cert for kube-apiserver + kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP} + + # Create client certs signed with client-ca, given id, given CN and a number of groups + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters + + # Create matching certificates for kube-aggregator + kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP} + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy + # TODO remove masters and add rolebinding + kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters + kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator +} + function start_apiserver { security_admission="" if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then @@ -485,7 +524,7 @@ function start_apiserver { fi priv_arg="" if [[ -n "${ALLOW_PRIVILEGED}" ]]; then - priv_arg="--allow-privileged " + priv_arg="--allow-privileged=${ALLOW_PRIVILEGED} " fi if [[ ${ENABLE_ADMISSION_PLUGINS} == *"Initializers"* ]]; then @@ -514,37 +553,11 @@ function start_apiserver { node_port_range="--service-node-port-range=${NODE_PORT_RANGE}" fi - # Create CA signers - if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then - kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"' - sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key" - sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt" - sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json" - else - kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"' - kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"' + if [[ "${REUSE_CERTS}" != true ]]; then + # Create Certs + generate_certs fi - # Create auth proxy client ca - kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"' - - # serving cert for kube-apiserver - kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" ${API_HOST_IP} ${API_HOST} ${FIRST_SERVICE_CLUSTER_IP} - - # Create client certs signed with client-ca, given id, given CN and a number of groups - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters - - # Create matching certificates for kube-aggregator - kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" ${API_HOST_IP} - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy - # TODO remove masters and add rolebinding - kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters - kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator - cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then cloud_config_arg="--cloud-provider=external" @@ -639,6 +652,7 @@ function start_controller_manager { --use-service-account-credentials \ --controllers="${KUBE_CONTROLLERS}" \ --leader-elect=false \ + --cert-dir="$CERT_DIR" \ --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 & CTLRMGR_PID=$! } @@ -679,7 +693,7 @@ function start_kubelet { priv_arg="" if [[ -n "${ALLOW_PRIVILEGED}" ]]; then - priv_arg="--allow-privileged " + priv_arg="--allow-privileged=${ALLOW_PRIVILEGED} " fi cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}" @@ -749,6 +763,7 @@ function start_kubelet { --enable-controller-attach-detach="${ENABLE_CONTROLLER_ATTACH_DETACH}" --cgroups-per-qos="${CGROUPS_PER_QOS}" --cgroup-driver="${CGROUP_DRIVER}" + --cgroup-root="${CGROUP_ROOT}" --eviction-hard="${EVICTION_HARD}" --eviction-soft="${EVICTION_SOFT}" --eviction-pressure-transition-period="${EVICTION_PRESSURE_TRANSITION_PERIOD}" @@ -861,7 +876,7 @@ EOF # foo: true # bar: false for gate in $(echo ${FEATURE_GATES} | tr ',' ' '); do - echo $gate | sed -e 's/\(.*\)=\(.*\)/ \1: \2/' + echo $gate | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/' done fi >>/tmp/kube-proxy.yaml @@ -884,9 +899,8 @@ EOF function start_kubedns { if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.in" kube-dns.yaml - sed -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml - sed -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml - + ${SED} -i -e "s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g" kube-dns.yaml + ${SED} -i -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" kube-dns.yaml # TODO update to dns role once we have one. # use kubectl to create kubedns addon ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kube-dns.yaml diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh index 2e5c95ae264..c82bfa30599 100755 --- a/hack/make-rules/test-e2e-node.sh +++ b/hack/make-rules/test-e2e-node.sh @@ -96,7 +96,7 @@ if [ $remote = true ] ; then test_suite=${TEST_SUITE:-"default"} # Get the compute zone - zone=$(gcloud info --format='value(config.properties.compute.zone)') + zone=${ZONE:-"$(gcloud info --format='value(config.properties.compute.zone)')"} if [[ $zone == "" ]]; then echo "Could not find gcloud compute/zone when running: \`gcloud info --format='value(config.properties.compute.zone)'\`" exit 1 diff --git a/hack/testdata/pod-changed.yaml b/hack/testdata/pod-changed.yaml new file mode 100644 index 00000000000..69476100d3f --- /dev/null +++ b/hack/testdata/pod-changed.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod + labels: + name: test-pod-label +spec: + containers: + - name: kubernetes-pause + image: k8s.gcr.io/pause:3.0 diff --git a/hack/update-gofmt.sh b/hack/update-gofmt.sh index c3220e3bbda..d5d40097389 100755 --- a/hack/update-gofmt.sh +++ b/hack/update-gofmt.sh @@ -31,6 +31,7 @@ find_files() { find . -not \( \ \( \ -wholename './output' \ + -o -wholename './.git' \ -o -wholename './_output' \ -o -wholename './_gopath' \ -o -wholename './release' \ diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh index ff8797d18db..9dd47884437 100755 --- a/hack/verify-gofmt.sh +++ b/hack/verify-gofmt.sh @@ -36,6 +36,7 @@ find_files() { find . -not \( \ \( \ -wholename './output' \ + -o -wholename './.git' \ -o -wholename './_output' \ -o -wholename './_gopath' \ -o -wholename './release' \ diff --git a/pkg/api/endpoints/util.go b/pkg/api/endpoints/util.go index c834a18f3f1..2c076f9afd1 100644 --- a/pkg/api/endpoints/util.go +++ b/pkg/api/endpoints/util.go @@ -171,6 +171,7 @@ func (sl addrsReady) Less(i, j int) bool { return lessAddrReady(sl[i], sl[j]) } +// LessEndpointAddress compares IP addresses lexicographically and returns true if first argument is lesser than second func LessEndpointAddress(a, b *api.EndpointAddress) bool { ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) if ipComparison != 0 { @@ -190,8 +191,8 @@ func LessEndpointAddress(a, b *api.EndpointAddress) bool { func SortSubsets(subsets []api.EndpointSubset) []api.EndpointSubset { for i := range subsets { ss := &subsets[i] - sort.Sort(addrsByIpAndUID(ss.Addresses)) - sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) + sort.Sort(addrsByIPAndUID(ss.Addresses)) + sort.Sort(addrsByIPAndUID(ss.NotReadyAddresses)) sort.Sort(portsByHash(ss.Ports)) } sort.Sort(subsetsByHash(subsets)) @@ -214,11 +215,11 @@ func (sl subsetsByHash) Less(i, j int) bool { return bytes.Compare(h1, h2) < 0 } -type addrsByIpAndUID []api.EndpointAddress +type addrsByIPAndUID []api.EndpointAddress -func (sl addrsByIpAndUID) Len() int { return len(sl) } -func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsByIpAndUID) Less(i, j int) bool { +func (sl addrsByIPAndUID) Len() int { return len(sl) } +func (sl addrsByIPAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsByIPAndUID) Less(i, j int) bool { return LessEndpointAddress(&sl[i], &sl[j]) } diff --git a/pkg/api/ref/ref.go b/pkg/api/ref/ref.go index d6576750ad1..d1a4c10de7b 100644 --- a/pkg/api/ref/ref.go +++ b/pkg/api/ref/ref.go @@ -28,8 +28,8 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" ) +// Errors that could be returned by GetReference. var ( - // Errors that could be returned by GetReference. ErrNilObject = errors.New("can't reference a nil object") ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") ) @@ -80,12 +80,12 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*api.ObjectRefere if len(selfLink) == 0 { return nil, ErrNoSelfLink } - selfLinkUrl, err := url.Parse(selfLink) + selfLinkURL, err := url.Parse(selfLink) if err != nil { return nil, err } // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") + parts := strings.Split(selfLinkURL.Path, "/") if len(parts) < 3 { return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) } diff --git a/pkg/api/testapi/testapi.go b/pkg/api/testapi/testapi.go index 138d8241314..31d419620c9 100644 --- a/pkg/api/testapi/testapi.go +++ b/pkg/api/testapi/testapi.go @@ -53,6 +53,7 @@ import ( "k8s.io/kubernetes/pkg/apis/settings" "k8s.io/kubernetes/pkg/apis/storage" + // Initialize install packages _ "k8s.io/kubernetes/pkg/apis/admission/install" _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" _ "k8s.io/kubernetes/pkg/apis/apps/install" @@ -74,6 +75,7 @@ import ( _ "k8s.io/kubernetes/pkg/apis/storage/install" ) +// Variables to store GroupName var ( Groups = make(map[string]TestGroup) Default TestGroup @@ -90,6 +92,7 @@ var ( storageSerializer runtime.SerializerInfo ) +// TestGroup contains GroupVersion to uniquely identify the API type TestGroup struct { externalGroupVersion schema.GroupVersion } @@ -276,6 +279,7 @@ func init() { Admission = Groups[admission.GroupName] } +// GroupVersion makes copy of schema.GroupVersion func (g TestGroup) GroupVersion() *schema.GroupVersion { copyOfGroupVersion := g.externalGroupVersion return ©OfGroupVersion @@ -290,6 +294,7 @@ func (g TestGroup) Codec() runtime.Codec { return legacyscheme.Codecs.CodecForVersions(serializer.Serializer, legacyscheme.Codecs.UniversalDeserializer(), schema.GroupVersions{g.externalGroupVersion}, nil) } +// StorageMediaType finds media type set by KUBE_TEST_API_STORAGE_TYPE env var used to store objects in storage func StorageMediaType() string { return os.Getenv("KUBE_TEST_API_STORAGE_TYPE") } @@ -322,14 +327,13 @@ func (g TestGroup) SelfLink(resource, name string) string { return fmt.Sprintf("/api/%s/%s", g.externalGroupVersion.Version, resource) } return fmt.Sprintf("/api/%s/%s/%s", g.externalGroupVersion.Version, resource, name) - } else { - // TODO: will need a /apis prefix once we have proper multi-group - // support - if name == "" { - return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) - } - return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) } + // TODO: will need a /apis prefix once we have proper multi-group + // support + if name == "" { + return fmt.Sprintf("/apis/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource) + } + return fmt.Sprintf("/apis/%s/%s/%s/%s", g.externalGroupVersion.Group, g.externalGroupVersion.Version, resource, name) } // ResourcePathWithPrefix returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. diff --git a/pkg/api/testing/compat/compatibility_tester.go b/pkg/api/testing/compat/compatibility_tester.go index 68b4bc9a852..941b139c768 100644 --- a/pkg/api/testing/compat/compatibility_tester.go +++ b/pkg/api/testing/compat/compatibility_tester.go @@ -31,11 +31,10 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" ) -// Based on: https://github.com/openshift/origin/blob/master/pkg/api/compatibility_test.go -// // TestCompatibility reencodes the input using the codec for the given // version and checks for the presence of the expected keys and absent // keys in the resulting JSON. +// Based on: https://github.com/openshift/origin/blob/master/pkg/api/compatibility_test.go func TestCompatibility( t *testing.T, version schema.GroupVersion, diff --git a/pkg/api/testing/conversion.go b/pkg/api/testing/conversion.go index 67f138305de..229438faf49 100644 --- a/pkg/api/testing/conversion.go +++ b/pkg/api/testing/conversion.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" ) -// TestSelectableFieldLabelConversions verifies that given resource have field +// TestSelectableFieldLabelConversionsOfKind verifies that given resource have field // label conversion defined for each its selectable field. // fields contains selectable fields of the resource. // labelMap maps deprecated labels to their canonical names. diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 85d3d264bc2..2a2bb3175ba 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -88,6 +88,7 @@ func overrideGenericFuncs(codecs runtimeserializer.CodecFactory) []interface{} { } } +// FuzzerFuncs is a list of fuzzer functions var FuzzerFuncs = fuzzer.MergeFuzzerFuncs( genericfuzzer.Funcs, overrideGenericFuncs, diff --git a/pkg/api/testing/pod_specs.go b/pkg/api/testing/pod_specs.go index 688788dcefd..9c3d0dcb7bf 100644 --- a/pkg/api/testing/pod_specs.go +++ b/pkg/api/testing/pod_specs.go @@ -24,22 +24,26 @@ import ( // DeepEqualSafePodSpec returns a PodSpec which is ready to be used with apiequality.Semantic.DeepEqual func DeepEqualSafePodSpec() api.PodSpec { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks return api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, SecurityContext: &api.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, } } // V1DeepEqualSafePodSpec returns a PodSpec which is ready to be used with apiequality.Semantic.DeepEqual func V1DeepEqualSafePodSpec() v1.PodSpec { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks return v1.PodSpec{ RestartPolicy: v1.RestartPolicyAlways, DNSPolicy: v1.DNSClusterFirst, TerminationGracePeriodSeconds: &grace, SecurityContext: &v1.PodSecurityContext{}, + EnableServiceLinks: &enableServiceLinks, } } diff --git a/pkg/api/testing/serialization_test.go b/pkg/api/testing/serialization_test.go index e608916c70e..2f34df00fe6 100644 --- a/pkg/api/testing/serialization_test.go +++ b/pkg/api/testing/serialization_test.go @@ -65,7 +65,7 @@ func fuzzInternalObject(t *testing.T, forVersion schema.GroupVersion, item runti return item } -func Convert_v1beta1_ReplicaSet_to_api_ReplicationController(in *v1beta1.ReplicaSet, out *api.ReplicationController, s conversion.Scope) error { +func ConvertV1beta1ReplicaSetToAPIReplicationController(in *v1beta1.ReplicaSet, out *api.ReplicationController, s conversion.Scope) error { intermediate1 := &extensions.ReplicaSet{} if err := k8s_v1beta1.Convert_v1beta1_ReplicaSet_To_extensions_ReplicaSet(in, intermediate1, s); err != nil { return err @@ -80,7 +80,7 @@ func Convert_v1beta1_ReplicaSet_to_api_ReplicationController(in *v1beta1.Replica } func TestSetControllerConversion(t *testing.T) { - if err := legacyscheme.Scheme.AddConversionFuncs(Convert_v1beta1_ReplicaSet_to_api_ReplicationController); err != nil { + if err := legacyscheme.Scheme.AddConversionFuncs(ConvertV1beta1ReplicaSetToAPIReplicationController); err != nil { t.Fatal(err) } @@ -214,6 +214,7 @@ func TestRoundTripTypes(t *testing.T) { // decoded without information loss or mutation. func TestEncodePtr(t *testing.T) { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks pod := &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"name": "foo"}, @@ -224,8 +225,9 @@ func TestEncodePtr(t *testing.T) { TerminationGracePeriodSeconds: &grace, - SecurityContext: &api.PodSecurityContext{}, - SchedulerName: api.DefaultSchedulerName, + SecurityContext: &api.PodSecurityContext{}, + SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } obj := runtime.Object(pod) diff --git a/pkg/api/v1/endpoints/util.go b/pkg/api/v1/endpoints/util.go index db551c68a6f..d0af83be274 100644 --- a/pkg/api/v1/endpoints/util.go +++ b/pkg/api/v1/endpoints/util.go @@ -172,6 +172,7 @@ func (sl addrsReady) Less(i, j int) bool { return lessAddrReady(sl[i], sl[j]) } +// LessEndpointAddress compares IP addresses lexicographically and returns true if first argument is lesser than second func LessEndpointAddress(a, b *v1.EndpointAddress) bool { ipComparison := bytes.Compare([]byte(a.IP), []byte(b.IP)) if ipComparison != 0 { @@ -191,8 +192,8 @@ func LessEndpointAddress(a, b *v1.EndpointAddress) bool { func SortSubsets(subsets []v1.EndpointSubset) []v1.EndpointSubset { for i := range subsets { ss := &subsets[i] - sort.Sort(addrsByIpAndUID(ss.Addresses)) - sort.Sort(addrsByIpAndUID(ss.NotReadyAddresses)) + sort.Sort(addrsByIPAndUID(ss.Addresses)) + sort.Sort(addrsByIPAndUID(ss.NotReadyAddresses)) sort.Sort(portsByHash(ss.Ports)) } sort.Sort(subsetsByHash(subsets)) @@ -215,11 +216,11 @@ func (sl subsetsByHash) Less(i, j int) bool { return bytes.Compare(h1, h2) < 0 } -type addrsByIpAndUID []v1.EndpointAddress +type addrsByIPAndUID []v1.EndpointAddress -func (sl addrsByIpAndUID) Len() int { return len(sl) } -func (sl addrsByIpAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } -func (sl addrsByIpAndUID) Less(i, j int) bool { +func (sl addrsByIPAndUID) Len() int { return len(sl) } +func (sl addrsByIPAndUID) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] } +func (sl addrsByIPAndUID) Less(i, j int) bool { return LessEndpointAddress(&sl[i], &sl[j]) } diff --git a/pkg/api/v1/pod/util.go b/pkg/api/v1/pod/util.go index 416221d520e..558e8a48c1a 100644 --- a/pkg/api/v1/pod/util.go +++ b/pkg/api/v1/pod/util.go @@ -238,13 +238,13 @@ func IsPodReady(pod *v1.Pod) bool { return IsPodReadyConditionTrue(pod.Status) } -// IsPodReady returns true if a pod is ready; false otherwise. +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. func IsPodReadyConditionTrue(status v1.PodStatus) bool { condition := GetPodReadyCondition(status) return condition != nil && condition.Status == v1.ConditionTrue } -// Extracts the pod ready condition from the given status and returns that. +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. // Returns nil if the condition is not present. func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { _, condition := GetPodCondition(&status, v1.PodReady) @@ -274,7 +274,7 @@ func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodC return -1, nil } -// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the // status has changed. // Returns true if pod condition has changed or has been added. func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { @@ -286,20 +286,19 @@ func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { // We are adding new pod condition. status.Conditions = append(status.Conditions, *condition) return true - } else { - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual } diff --git a/pkg/api/v1/resource/helpers.go b/pkg/api/v1/resource/helpers.go index 6f7c59f587d..d30ab7ed86e 100644 --- a/pkg/api/v1/resource/helpers.go +++ b/pkg/api/v1/resource/helpers.go @@ -68,7 +68,7 @@ func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) { return } -// finds and returns the request for a specific resource. +// GetResourceRequest finds and returns the request for a specific resource. func GetResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 { if resource == v1.ResourcePods { return 1 diff --git a/pkg/apis/abac/v0/doc.go b/pkg/apis/abac/v0/doc.go index bd73e3f49a4..b8d89497834 100644 --- a/pkg/apis/abac/v0/doc.go +++ b/pkg/apis/abac/v0/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=abac.authorization.kubernetes.io + package v0 // import "k8s.io/kubernetes/pkg/apis/abac/v0" diff --git a/pkg/apis/abac/v1beta1/doc.go b/pkg/apis/abac/v1beta1/doc.go index 3fa41e6b849..62ac4e21d6f 100644 --- a/pkg/apis/abac/v1beta1/doc.go +++ b/pkg/apis/abac/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=abac.authorization.kubernetes.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/abac/v1beta1" diff --git a/pkg/apis/admission/doc.go b/pkg/apis/admission/doc.go index 64273253a95..bbf4d2e92d2 100644 --- a/pkg/apis/admission/doc.go +++ b/pkg/apis/admission/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=admission.k8s.io + package admission // import "k8s.io/kubernetes/pkg/apis/admission" diff --git a/pkg/apis/admission/v1beta1/doc.go b/pkg/apis/admission/v1beta1/doc.go index 1bb4d198e48..b86242990bc 100644 --- a/pkg/apis/admission/v1beta1/doc.go +++ b/pkg/apis/admission/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admission/v1beta1 // +groupName=admission.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/admission/v1beta1" diff --git a/pkg/apis/admissionregistration/doc.go b/pkg/apis/admissionregistration/doc.go index 7b76bb29b24..8c2f04230ad 100644 --- a/pkg/apis/admissionregistration/doc.go +++ b/pkg/apis/admissionregistration/doc.go @@ -15,10 +15,10 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=admissionregistration.k8s.io // Package admissionregistration is the internal version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package admissionregistration // import "k8s.io/kubernetes/pkg/apis/admissionregistration" diff --git a/pkg/apis/admissionregistration/v1alpha1/doc.go b/pkg/apis/admissionregistration/v1alpha1/doc.go index 43fd23dabf6..833526f715c 100644 --- a/pkg/apis/admissionregistration/v1alpha1/doc.go +++ b/pkg/apis/admissionregistration/v1alpha1/doc.go @@ -18,10 +18,10 @@ limitations under the License. // +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1alpha1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1alpha1 +// +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1" diff --git a/pkg/apis/admissionregistration/v1beta1/doc.go b/pkg/apis/admissionregistration/v1beta1/doc.go index cf03718ed26..873110d12dd 100644 --- a/pkg/apis/admissionregistration/v1beta1/doc.go +++ b/pkg/apis/admissionregistration/v1beta1/doc.go @@ -18,10 +18,10 @@ limitations under the License. // +k8s:conversion-gen-external-types=k8s.io/api/admissionregistration/v1beta1 // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/admissionregistration/v1beta1 +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration, ValidatingWebhookConfiguration, and MutatingWebhookConfiguration are for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1" diff --git a/pkg/apis/apps/v1/defaults_test.go b/pkg/apis/apps/v1/defaults_test.go index 484cbf713a0..04322523272 100644 --- a/pkg/apis/apps/v1/defaults_test.go +++ b/pkg/apis/apps/v1/defaults_test.go @@ -39,6 +39,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { defaultLabels := map[string]string{"foo": "bar"} maxUnavailable := intstr.FromInt(1) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -46,6 +47,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, ObjectMeta: metav1.ObjectMeta{ Labels: defaultLabels, @@ -58,6 +60,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { @@ -175,6 +178,7 @@ func TestSetDefaultStatefulSet(t *testing.T) { var defaultReplicas int32 = 1 period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -182,6 +186,7 @@ func TestSetDefaultStatefulSet(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, ObjectMeta: metav1.ObjectMeta{ Labels: defaultLabels, @@ -286,6 +291,7 @@ func TestSetDefaultDeployment(t *testing.T) { defaultIntOrString := intstr.FromString("25%") differentIntOrString := intstr.FromInt(5) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -293,6 +299,7 @@ func TestSetDefaultDeployment(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { diff --git a/pkg/apis/apps/v1beta1/defaults_test.go b/pkg/apis/apps/v1beta1/defaults_test.go index be10a078f5b..210339607ca 100644 --- a/pkg/apis/apps/v1beta1/defaults_test.go +++ b/pkg/apis/apps/v1beta1/defaults_test.go @@ -38,6 +38,7 @@ func TestSetDefaultDeployment(t *testing.T) { defaultIntOrString := intstr.FromString("25%") differentIntOrString := intstr.FromInt(5) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -45,6 +46,7 @@ func TestSetDefaultDeployment(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { diff --git a/pkg/apis/apps/v1beta2/defaults_test.go b/pkg/apis/apps/v1beta2/defaults_test.go index 9edf217ea41..4258f20d896 100644 --- a/pkg/apis/apps/v1beta2/defaults_test.go +++ b/pkg/apis/apps/v1beta2/defaults_test.go @@ -39,6 +39,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { defaultLabels := map[string]string{"foo": "bar"} maxUnavailable := intstr.FromInt(1) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -46,6 +47,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, ObjectMeta: metav1.ObjectMeta{ Labels: defaultLabels, @@ -58,6 +60,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { @@ -175,6 +178,7 @@ func TestSetDefaultStatefulSet(t *testing.T) { var defaultReplicas int32 = 1 period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -182,6 +186,7 @@ func TestSetDefaultStatefulSet(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, ObjectMeta: metav1.ObjectMeta{ Labels: defaultLabels, @@ -286,6 +291,7 @@ func TestSetDefaultDeployment(t *testing.T) { defaultIntOrString := intstr.FromString("25%") differentIntOrString := intstr.FromInt(5) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -293,6 +299,7 @@ func TestSetDefaultDeployment(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { diff --git a/pkg/apis/authentication/doc.go b/pkg/apis/authentication/doc.go index 0afbdd3a3a6..b86561616ec 100644 --- a/pkg/apis/authentication/doc.go +++ b/pkg/apis/authentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io + package authentication // import "k8s.io/kubernetes/pkg/apis/authentication" diff --git a/pkg/apis/authentication/v1/doc.go b/pkg/apis/authentication/v1/doc.go index 50ec02077ff..6c4eabafa26 100644 --- a/pkg/apis/authentication/v1/doc.go +++ b/pkg/apis/authentication/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1 + package v1 // import "k8s.io/kubernetes/pkg/apis/authentication/v1" diff --git a/pkg/apis/authentication/v1beta1/doc.go b/pkg/apis/authentication/v1beta1/doc.go index 7f7a5ffa3be..07c1f498330 100644 --- a/pkg/apis/authentication/v1beta1/doc.go +++ b/pkg/apis/authentication/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=authentication.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authentication/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authentication/v1beta1" diff --git a/pkg/apis/authorization/doc.go b/pkg/apis/authorization/doc.go index 5cb3094aba8..896049861f6 100644 --- a/pkg/apis/authorization/doc.go +++ b/pkg/apis/authorization/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authorization.k8s.io + package authorization // import "k8s.io/kubernetes/pkg/apis/authorization" diff --git a/pkg/apis/authorization/v1/doc.go b/pkg/apis/authorization/v1/doc.go index 11b7605c898..6b4259dffec 100644 --- a/pkg/apis/authorization/v1/doc.go +++ b/pkg/apis/authorization/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1 // +groupName=authorization.k8s.io + package v1 // import "k8s.io/kubernetes/pkg/apis/authorization/v1" diff --git a/pkg/apis/authorization/v1beta1/doc.go b/pkg/apis/authorization/v1beta1/doc.go index a958fa36550..3300d9ce20b 100644 --- a/pkg/apis/authorization/v1beta1/doc.go +++ b/pkg/apis/authorization/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/authorization/v1beta1 // +groupName=authorization.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/authorization/v1beta1" diff --git a/pkg/apis/certificates/doc.go b/pkg/apis/certificates/doc.go index 65aad6491eb..c752aacaf57 100644 --- a/pkg/apis/certificates/doc.go +++ b/pkg/apis/certificates/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=certificates.k8s.io + package certificates // import "k8s.io/kubernetes/pkg/apis/certificates" diff --git a/pkg/apis/certificates/v1beta1/doc.go b/pkg/apis/certificates/v1beta1/doc.go index d5f13dfff3a..8ba037c2592 100644 --- a/pkg/apis/certificates/v1beta1/doc.go +++ b/pkg/apis/certificates/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/certificates/v1beta1 // +groupName=certificates.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/certificates/v1beta1" diff --git a/pkg/apis/coordination/doc.go b/pkg/apis/coordination/doc.go index c8e6b5ce605..8cce2eda252 100644 --- a/pkg/apis/coordination/doc.go +++ b/pkg/apis/coordination/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=coordination.k8s.io + package coordination // import "k8s.io/kubernetes/pkg/apis/coordination" diff --git a/pkg/apis/coordination/v1beta1/doc.go b/pkg/apis/coordination/v1beta1/doc.go index 406dbe4b33d..da30f7fc2be 100644 --- a/pkg/apis/coordination/v1beta1/doc.go +++ b/pkg/apis/coordination/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/coordination/v1beta1 // +groupName=coordination.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/coordination/v1beta1" diff --git a/pkg/apis/core/fuzzer/BUILD b/pkg/apis/core/fuzzer/BUILD index c1f231f6227..c30003bbdd7 100644 --- a/pkg/apis/core/fuzzer/BUILD +++ b/pkg/apis/core/fuzzer/BUILD @@ -11,6 +11,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/apis/core/fuzzer", deps = [ "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/apis/core/fuzzer/fuzzer.go b/pkg/apis/core/fuzzer/fuzzer.go index 7a2c9b7daf9..cc18ed395d8 100644 --- a/pkg/apis/core/fuzzer/fuzzer.go +++ b/pkg/apis/core/fuzzer/fuzzer.go @@ -23,6 +23,7 @@ import ( fuzz "github.com/google/gofuzz" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -84,6 +85,10 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { if s.SchedulerName == "" { s.SchedulerName = core.DefaultSchedulerName } + if s.EnableServiceLinks == nil { + enableServiceLinks := corev1.DefaultEnableServiceLinks + s.EnableServiceLinks = &enableServiceLinks + } }, func(j *core.PodPhase, c fuzz.Continue) { statuses := []core.PodPhase{core.PodPending, core.PodRunning, core.PodFailed, core.PodUnknown} diff --git a/pkg/apis/core/helper/helpers.go b/pkg/apis/core/helper/helpers.go index 486122298c2..10c33f66bd1 100644 --- a/pkg/apis/core/helper/helpers.go +++ b/pkg/apis/core/helper/helpers.go @@ -537,28 +537,3 @@ func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { return false } - -// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements -// labels.Selector. -func ScopedResourceSelectorRequirementsAsSelector(ssr core.ScopedResourceSelectorRequirement) (labels.Selector, error) { - selector := labels.NewSelector() - var op selection.Operator - switch ssr.Operator { - case core.ScopeSelectorOpIn: - op = selection.In - case core.ScopeSelectorOpNotIn: - op = selection.NotIn - case core.ScopeSelectorOpExists: - op = selection.Exists - case core.ScopeSelectorOpDoesNotExist: - op = selection.DoesNotExist - default: - return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) - } - r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) - if err != nil { - return nil, err - } - selector = selector.Add(*r) - return selector, nil -} diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index 702ffa4d432..72c8a966127 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2597,6 +2597,11 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the diff --git a/pkg/apis/core/v1/defaults.go b/pkg/apis/core/v1/defaults.go index 1c7e2b32d66..3edf9170017 100644 --- a/pkg/apis/core/v1/defaults.go +++ b/pkg/apis/core/v1/defaults.go @@ -182,6 +182,10 @@ func SetDefaults_PodSpec(obj *v1.PodSpec) { if obj.SchedulerName == "" { obj.SchedulerName = v1.DefaultSchedulerName } + if obj.EnableServiceLinks == nil { + enableServiceLinks := v1.DefaultEnableServiceLinks + obj.EnableServiceLinks = &enableServiceLinks + } } func SetDefaults_Probe(obj *v1.Probe) { if obj.TimeoutSeconds == 0 { diff --git a/pkg/apis/core/v1/defaults_test.go b/pkg/apis/core/v1/defaults_test.go index 4e4d6da5f0f..9c5da00b3b8 100644 --- a/pkg/apis/core/v1/defaults_test.go +++ b/pkg/apis/core/v1/defaults_test.go @@ -1370,3 +1370,11 @@ func TestSetDefaultHostPathVolumeSource(t *testing.T) { t.Errorf("Expected v1.HostPathVolumeSource default type %v, got %v", expectedType, defaultType) } } + +func TestSetDefaultEnableServiceLinks(t *testing.T) { + pod := &v1.Pod{} + output := roundTrip(t, runtime.Object(pod)).(*v1.Pod) + if output.Spec.EnableServiceLinks == nil || *output.Spec.EnableServiceLinks != v1.DefaultEnableServiceLinks { + t.Errorf("Expected enableServiceLinks value: %+v\ngot: %+v\n", v1.DefaultEnableServiceLinks, *output.Spec.EnableServiceLinks) + } +} diff --git a/pkg/apis/core/v1/helper/helpers.go b/pkg/apis/core/v1/helper/helpers.go index bf6c001b780..fa11a6b36a9 100644 --- a/pkg/apis/core/v1/helper/helpers.go +++ b/pkg/apis/core/v1/helper/helpers.go @@ -500,3 +500,28 @@ func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { return "" } + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/pkg/apis/core/v1/zz_generated.conversion.go b/pkg/apis/core/v1/zz_generated.conversion.go index 7bfbcf0f119..45f9454fd8d 100644 --- a/pkg/apis/core/v1/zz_generated.conversion.go +++ b/pkg/apis/core/v1/zz_generated.conversion.go @@ -5550,6 +5550,7 @@ func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]core.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } @@ -5616,6 +5617,7 @@ func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) out.ReadinessGates = *(*[]v1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) return nil } diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index ab81fe45d33..db06578dd54 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -5124,50 +5124,16 @@ func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) return allErrs } -// Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []core.EndpointAddress) { - for n := range addresses { - if addresses[n].NodeName == nil { - continue - } - ipToNodeName[addresses[n].IP] = *addresses[n].NodeName - } -} - -// Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []core.EndpointSubset) map[string]string { - ipToNodeName := make(map[string]string) - for i := range subsets { - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) - updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].NotReadyAddresses) - } - return ipToNodeName -} - -func validateEpAddrNodeNameTransition(addr *core.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { - errList := field.ErrorList{} - existingNodeName, found := ipToNodeName[addr.IP] - if !found { - return errList - } - if addr.NodeName == nil || *addr.NodeName == existingNodeName { - return errList - } - // NodeName entry found for this endpoint IP, but user is attempting to change NodeName - return append(errList, field.Forbidden(fldPath, fmt.Sprintf("Cannot change NodeName for %s to %s", addr.IP, *addr.NodeName))) -} - // ValidateEndpoints tests if required fields are set. func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []core.EndpointSubset{}, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...) return allErrs } -func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { +func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) for i := range subsets { ss := &subsets[i] idxPath := fldPath.Index(i) @@ -5178,10 +5144,10 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.En allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) } for addr := range ss.Addresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr), ipToNodeName)...) + allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...) } for addr := range ss.NotReadyAddresses { - allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr), ipToNodeName)...) + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...) } for port := range ss.Ports { allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) @@ -5191,7 +5157,7 @@ func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.En return allErrs } -func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, msg := range validation.IsValidIP(address.IP) { allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) @@ -5205,10 +5171,6 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) } } - allErrs = append(allErrs, validateEpAddrNodeNameTransition(address, ipToNodeName, fldPath.Child("nodeName"))...) - if len(allErrs) > 0 { - return allErrs - } allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) return allErrs } @@ -5260,9 +5222,11 @@ func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *fi } // ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. +// NodeName changes are allowed during update to accommodate the case where nodeIP or PodCIDR is reused. +// An existing endpoint ip will have a different nodeName if this happens. func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, field.NewPath("subsets"))...) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) return allErrs } diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go index 64c75344277..aca1a5b3370 100644 --- a/pkg/apis/core/validation/validation_test.go +++ b/pkg/apis/core/validation/validation_test.go @@ -12965,11 +12965,12 @@ func newNodeNameEndpoint(nodeName string) *core.Endpoints { func TestEndpointAddressNodeNameUpdateRestrictions(t *testing.T) { oldEndpoint := newNodeNameEndpoint("kubernetes-node-setup-by-backend") updatedEndpoint := newNodeNameEndpoint("kubernetes-changed-nodename") - // Check that NodeName cannot be changed during update (if already set) + // Check that NodeName can be changed during update, this is to accommodate the case where nodeIP or PodCIDR is reused. + // The same ip will now have a different nodeName. errList := ValidateEndpoints(updatedEndpoint) errList = append(errList, ValidateEndpointsUpdate(updatedEndpoint, oldEndpoint)...) - if len(errList) == 0 { - t.Error("Endpoint should not allow changing of Subset.Addresses.NodeName on update") + if len(errList) != 0 { + t.Error("Endpoint should allow changing of Subset.Addresses.NodeName on update") } } diff --git a/pkg/apis/core/zz_generated.deepcopy.go b/pkg/apis/core/zz_generated.deepcopy.go index 26fe6fa0321..808dad27495 100644 --- a/pkg/apis/core/zz_generated.deepcopy.go +++ b/pkg/apis/core/zz_generated.deepcopy.go @@ -3556,6 +3556,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } diff --git a/pkg/apis/events/doc.go b/pkg/apis/events/doc.go index 15095ad3a29..a2a963f7fcb 100644 --- a/pkg/apis/events/doc.go +++ b/pkg/apis/events/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +groupName=events.k8s.io + package events // import "k8s.io/kubernetes/pkg/apis/events" diff --git a/pkg/apis/events/v1beta1/doc.go b/pkg/apis/events/v1beta1/doc.go index 174190914fa..17d89c67972 100644 --- a/pkg/apis/events/v1beta1/doc.go +++ b/pkg/apis/events/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/events/v1beta1 // +groupName=events.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/events/v1beta1" diff --git a/pkg/apis/extensions/v1beta1/defaults_test.go b/pkg/apis/extensions/v1beta1/defaults_test.go index 2d0de110370..fa9b1b6166e 100644 --- a/pkg/apis/extensions/v1beta1/defaults_test.go +++ b/pkg/apis/extensions/v1beta1/defaults_test.go @@ -40,6 +40,7 @@ import ( func TestSetDefaultDaemonSetSpec(t *testing.T) { defaultLabels := map[string]string{"foo": "bar"} period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -47,6 +48,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, ObjectMeta: metav1.ObjectMeta{ Labels: defaultLabels, @@ -59,6 +61,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { @@ -164,6 +167,7 @@ func TestSetDefaultDeployment(t *testing.T) { defaultIntOrString := intstr.FromInt(1) differentIntOrString := intstr.FromInt(5) period := int64(v1.DefaultTerminationGracePeriodSeconds) + enableServiceLinks := v1.DefaultEnableServiceLinks defaultTemplate := v1.PodTemplateSpec{ Spec: v1.PodSpec{ DNSPolicy: v1.DNSClusterFirst, @@ -171,6 +175,7 @@ func TestSetDefaultDeployment(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &period, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } tests := []struct { diff --git a/pkg/apis/imagepolicy/doc.go b/pkg/apis/imagepolicy/doc.go index a0af1868e13..bfcdee2a481 100644 --- a/pkg/apis/imagepolicy/doc.go +++ b/pkg/apis/imagepolicy/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=imagepolicy.k8s.io + package imagepolicy // import "k8s.io/kubernetes/pkg/apis/imagepolicy" diff --git a/pkg/apis/imagepolicy/v1alpha1/doc.go b/pkg/apis/imagepolicy/v1alpha1/doc.go index b517ce45438..3d596bd022f 100644 --- a/pkg/apis/imagepolicy/v1alpha1/doc.go +++ b/pkg/apis/imagepolicy/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/imagepolicy/v1alpha1 // +groupName=imagepolicy.k8s.io + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1" diff --git a/pkg/apis/networking/doc.go b/pkg/apis/networking/doc.go index 8b013e34867..218cd6c2f78 100644 --- a/pkg/apis/networking/doc.go +++ b/pkg/apis/networking/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=networking.k8s.io + package networking // import "k8s.io/kubernetes/pkg/apis/networking" diff --git a/pkg/apis/networking/v1/doc.go b/pkg/apis/networking/v1/doc.go index f53cbf3dc65..4a2e671e711 100644 --- a/pkg/apis/networking/v1/doc.go +++ b/pkg/apis/networking/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/networking/v1 // +groupName=networking.k8s.io + package v1 // import "k8s.io/kubernetes/pkg/apis/networking/v1" diff --git a/pkg/apis/rbac/doc.go b/pkg/apis/rbac/doc.go index bebcb771d12..ea2309eea74 100644 --- a/pkg/apis/rbac/doc.go +++ b/pkg/apis/rbac/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package rbac // import "k8s.io/kubernetes/pkg/apis/rbac" diff --git a/pkg/apis/rbac/v1/doc.go b/pkg/apis/rbac/v1/doc.go index 5608caba705..b96dc30bc51 100644 --- a/pkg/apis/rbac/v1/doc.go +++ b/pkg/apis/rbac/v1/doc.go @@ -21,4 +21,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=rbac.authorization.k8s.io + package v1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1" diff --git a/pkg/apis/rbac/v1alpha1/doc.go b/pkg/apis/rbac/v1alpha1/doc.go index 365f3881436..67aacff933b 100644 --- a/pkg/apis/rbac/v1alpha1/doc.go +++ b/pkg/apis/rbac/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1alpha1 // +groupName=rbac.authorization.k8s.io + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1" diff --git a/pkg/apis/rbac/v1beta1/doc.go b/pkg/apis/rbac/v1beta1/doc.go index 7ba759013a2..854453e8df1 100644 --- a/pkg/apis/rbac/v1beta1/doc.go +++ b/pkg/apis/rbac/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/rbac/v1beta1 // +groupName=rbac.authorization.k8s.io + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" diff --git a/pkg/apis/scheduling/doc.go b/pkg/apis/scheduling/doc.go index f2745c227e2..bab0ae332af 100644 --- a/pkg/apis/scheduling/doc.go +++ b/pkg/apis/scheduling/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=scheduling.k8s.io + package scheduling // import "k8s.io/kubernetes/pkg/apis/scheduling" diff --git a/pkg/apis/scheduling/v1alpha1/doc.go b/pkg/apis/scheduling/v1alpha1/doc.go index e2bf21c7697..f3149504705 100644 --- a/pkg/apis/scheduling/v1alpha1/doc.go +++ b/pkg/apis/scheduling/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1alpha1 + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1" diff --git a/pkg/apis/scheduling/v1beta1/doc.go b/pkg/apis/scheduling/v1beta1/doc.go index 117381f65f4..ba57c832261 100644 --- a/pkg/apis/scheduling/v1beta1/doc.go +++ b/pkg/apis/scheduling/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=scheduling.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/scheduling/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/scheduling/v1beta1" diff --git a/pkg/apis/settings/doc.go b/pkg/apis/settings/doc.go index 6093e3e8241..297432dceb4 100644 --- a/pkg/apis/settings/doc.go +++ b/pkg/apis/settings/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=settings.k8s.io + package settings // import "k8s.io/kubernetes/pkg/apis/settings" diff --git a/pkg/apis/settings/v1alpha1/doc.go b/pkg/apis/settings/v1alpha1/doc.go index 4422bb3e77b..998e91f833c 100644 --- a/pkg/apis/settings/v1alpha1/doc.go +++ b/pkg/apis/settings/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/settings/v1alpha1 // +groupName=settings.k8s.io + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/settings/v1alpha1" diff --git a/pkg/apis/storage/doc.go b/pkg/apis/storage/doc.go index 5220b981e47..52b2c2d822c 100644 --- a/pkg/apis/storage/doc.go +++ b/pkg/apis/storage/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io + package storage // import "k8s.io/kubernetes/pkg/apis/storage" diff --git a/pkg/apis/storage/v1/doc.go b/pkg/apis/storage/v1/doc.go index 617aa14c1aa..4019b756571 100644 --- a/pkg/apis/storage/v1/doc.go +++ b/pkg/apis/storage/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1 + package v1 diff --git a/pkg/apis/storage/v1alpha1/doc.go b/pkg/apis/storage/v1alpha1/doc.go index 07c766c3a28..e3df1a90696 100644 --- a/pkg/apis/storage/v1alpha1/doc.go +++ b/pkg/apis/storage/v1alpha1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1alpha1 + package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/storage/v1alpha1" diff --git a/pkg/apis/storage/v1beta1/doc.go b/pkg/apis/storage/v1beta1/doc.go index a5b0ca68ab8..ed4fe193a11 100644 --- a/pkg/apis/storage/v1beta1/doc.go +++ b/pkg/apis/storage/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=storage.k8s.io // +k8s:defaulter-gen=TypeMeta // +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/storage/v1beta1 + package v1beta1 // import "k8s.io/kubernetes/pkg/apis/storage/v1beta1" diff --git a/pkg/client/chaosclient/chaosclient.go b/pkg/client/chaosclient/chaosclient.go index eeb2c7f05d0..c985891d9dd 100644 --- a/pkg/client/chaosclient/chaosclient.go +++ b/pkg/client/chaosclient/chaosclient.go @@ -62,9 +62,11 @@ type ChaosNotifier interface { // error. type ChaosFunc func(req *http.Request) (bool, *http.Response, error) +// Intercept calls the nested method `Intercept` func (fn ChaosFunc) Intercept(req *http.Request) (bool, *http.Response, error) { return fn.Intercept(req) } + func (fn ChaosFunc) String() string { return runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() } @@ -141,7 +143,7 @@ type Error struct { error } -// C returns the nested error +// Intercept returns the nested error func (e Error) Intercept(_ *http.Request) (bool, *http.Response, error) { return true, nil, e.error } diff --git a/pkg/client/leaderelectionconfig/config.go b/pkg/client/leaderelectionconfig/config.go index 310f1313795..cc9ae7efac9 100644 --- a/pkg/client/leaderelectionconfig/config.go +++ b/pkg/client/leaderelectionconfig/config.go @@ -24,6 +24,7 @@ import ( ) const ( + // DefaultLeaseDuration defines a default duration of lease. DefaultLeaseDuration = 15 * time.Second ) diff --git a/pkg/client/tests/doc.go b/pkg/client/tests/doc.go index 46cb11fe851..342982e5fad 100644 --- a/pkg/client/tests/doc.go +++ b/pkg/client/tests/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package runs tests against the client which require an internal client +// Package tests runs tests against the client which require an internal client package tests diff --git a/pkg/cloudprovider/providers/aws/tags_test.go b/pkg/cloudprovider/providers/aws/tags_test.go index c745451431b..a8b55b80293 100644 --- a/pkg/cloudprovider/providers/aws/tags_test.go +++ b/pkg/cloudprovider/providers/aws/tags_test.go @@ -57,6 +57,12 @@ func TestFindClusterID(t *testing.T) { }, ExpectedNew: "a", }, + { + Tags: map[string]string{ + TagNameKubernetesClusterPrefix + "a": "shared", + }, + ExpectedNew: "a", + }, { Tags: map[string]string{ TagNameKubernetesClusterPrefix + "a": "", @@ -108,3 +114,68 @@ func TestFindClusterID(t *testing.T) { } } } + +func TestHasClusterTag(t *testing.T) { + awsServices := NewFakeAWSServices(TestClusterId) + c, err := newAWSCloud(CloudConfig{}, awsServices) + if err != nil { + t.Errorf("Error building aws cloud: %v", err) + return + } + grid := []struct { + Tags map[string]string + Expected bool + }{ + { + Tags: map[string]string{}, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterLegacy: TestClusterId, + }, + Expected: true, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterLegacy: "a", + }, + Expected: false, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterPrefix + TestClusterId: "owned", + }, + Expected: true, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterPrefix + TestClusterId: "", + }, + Expected: true, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterLegacy: "a", + TagNameKubernetesClusterPrefix + TestClusterId: "shared", + }, + Expected: true, + }, + { + Tags: map[string]string{ + TagNameKubernetesClusterPrefix + TestClusterId: "shared", + TagNameKubernetesClusterPrefix + "b": "shared", + }, + Expected: true, + }, + } + for _, g := range grid { + var ec2Tags []*ec2.Tag + for k, v := range g.Tags { + ec2Tags = append(ec2Tags, &ec2.Tag{Key: aws.String(k), Value: aws.String(v)}) + } + result := c.tagging.hasClusterTag(ec2Tags) + if result != g.Expected { + t.Errorf("Unexpected result for tags %v: %t", g.Tags, result) + } + } +} diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 4999c921f65..ca864212f97 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -46,13 +46,18 @@ go_library( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 5263773e59e..a73d3e13144 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -28,7 +28,11 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth" @@ -183,6 +187,10 @@ type Cloud struct { // client for vm sizes list VirtualMachineSizesClient VirtualMachineSizesClient + kubeClient clientset.Interface + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + vmCache *timedCache lbCache *timedCache nsgCache *timedCache @@ -383,7 +391,12 @@ func parseConfig(configReader io.Reader) (*Config, error) { } // Initialize passes a Kubernetes clientBuilder interface to the cloud provider -func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} +func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) { + az.kubeClient = clientBuilder.ClientOrDie("azure-cloud-provider") + az.eventBroadcaster = record.NewBroadcaster() + az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.kubeClient.CoreV1().Events("")}) + az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"}) +} // LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise. func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) { diff --git a/pkg/cloudprovider/providers/azure/azure_backoff.go b/pkg/cloudprovider/providers/azure/azure_backoff.go index 44998098033..6e37916497f 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff.go @@ -18,13 +18,15 @@ package azure import ( "context" + "fmt" "net/http" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" - "github.com/Azure/go-autorest/autorest" "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" @@ -45,6 +47,13 @@ func (az *Cloud) requestBackoff() (resourceRequestBackoff wait.Backoff) { return resourceRequestBackoff } +// Event creates a event for the specified object. +func (az *Cloud) Event(obj runtime.Object, eventtype, reason, message string) { + if obj != nil && reason != "" { + az.eventRecorder.Event(obj, eventtype, reason, message) + } +} + // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName) (compute.VirtualMachine, error) { var machine compute.VirtualMachine @@ -109,14 +118,14 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, } // CreateOrUpdateSGWithRetry invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { +func (az *Cloud) CreateOrUpdateSGWithRetry(service *v1.Service, sg network.SecurityGroup) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.ResourceGroup, *sg.Name, sg) glog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) - done, err := processHTTPRetryResponse(resp, err) + done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateSecurityGroup", resp, err) if done && err == nil { // Invalidate the cache right after updating az.nsgCache.Delete(*sg.Name) @@ -126,14 +135,14 @@ func (az *Cloud) CreateOrUpdateSGWithRetry(sg network.SecurityGroup) error { } // CreateOrUpdateLBWithRetry invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { +func (az *Cloud) CreateOrUpdateLBWithRetry(service *v1.Service, lb network.LoadBalancer) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.LoadBalancerClient.CreateOrUpdate(ctx, az.ResourceGroup, *lb.Name, lb) glog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name) - done, err := processHTTPRetryResponse(resp, err) + done, err := az.processHTTPRetryResponse(service, "CreateOrUpdateLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after updating az.lbCache.Delete(*lb.Name) @@ -143,7 +152,7 @@ func (az *Cloud) CreateOrUpdateLBWithRetry(lb network.LoadBalancer) error { } // ListLBWithRetry invokes az.LoadBalancerClient.List with exponential backoff retry -func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { +func (az *Cloud) ListLBWithRetry(service *v1.Service) ([]network.LoadBalancer, error) { var allLBs []network.LoadBalancer err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { @@ -153,6 +162,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { allLBs, retryErr = az.LoadBalancerClient.List(ctx, az.ResourceGroup) if retryErr != nil { + az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", retryErr.Error()) glog.Errorf("LoadBalancerClient.List(%v) - backoff: failure, will retry,err=%v", az.ResourceGroup, retryErr) @@ -169,7 +179,7 @@ func (az *Cloud) ListLBWithRetry() ([]network.LoadBalancer, error) { } // ListPIPWithRetry list the PIP resources in the given resource group -func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAddress, error) { +func (az *Cloud) ListPIPWithRetry(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) { var allPIPs []network.PublicIPAddress err := wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { @@ -179,6 +189,7 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd allPIPs, retryErr = az.PublicIPAddressesClient.List(ctx, pipResourceGroup) if retryErr != nil { + az.Event(service, v1.EventTypeWarning, "ListPublicIPs", retryErr.Error()) glog.Errorf("PublicIPAddressesClient.List(%v) - backoff: failure, will retry,err=%v", pipResourceGroup, retryErr) @@ -195,48 +206,48 @@ func (az *Cloud) ListPIPWithRetry(pipResourceGroup string) ([]network.PublicIPAd } // CreateOrUpdatePIPWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdatePIPWithRetry(pipResourceGroup string, pip network.PublicIPAddress) error { +func (az *Cloud) CreateOrUpdatePIPWithRetry(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, *pip.Name, pip) glog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, *pip.Name) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(service, "CreateOrUpdatePublicIPAddress", resp, err) }) } // CreateOrUpdateInterfaceWithRetry invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry -func (az *Cloud) CreateOrUpdateInterfaceWithRetry(nic network.Interface) error { +func (az *Cloud) CreateOrUpdateInterfaceWithRetry(service *v1.Service, nic network.Interface) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic) glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(service, "CreateOrUpdateInterface", resp, err) }) } // DeletePublicIPWithRetry invokes az.PublicIPAddressesClient.Delete with exponential backoff retry -func (az *Cloud) DeletePublicIPWithRetry(pipResourceGroup string, pipName string) error { +func (az *Cloud) DeletePublicIPWithRetry(service *v1.Service, pipResourceGroup string, pipName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(service, "DeletePublicIPAddress", resp, err) }) } // DeleteLBWithRetry invokes az.LoadBalancerClient.Delete with exponential backoff retry -func (az *Cloud) DeleteLBWithRetry(lbName string) error { +func (az *Cloud) DeleteLBWithRetry(service *v1.Service, lbName string) error { return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := az.LoadBalancerClient.Delete(ctx, az.ResourceGroup, lbName) - done, err := processHTTPRetryResponse(resp, err) + done, err := az.processHTTPRetryResponse(service, "DeleteLoadBalancer", resp, err) if done && err == nil { // Invalidate the cache right after deleting az.lbCache.Delete(lbName) @@ -252,7 +263,7 @@ func (az *Cloud) CreateOrUpdateRouteTableWithRetry(routeTable network.RouteTable defer cancel() resp, err := az.RouteTablesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, routeTable) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -264,7 +275,7 @@ func (az *Cloud) CreateOrUpdateRouteWithRetry(route network.Route) error { resp, err := az.RoutesClient.CreateOrUpdate(ctx, az.ResourceGroup, az.RouteTableName, *route.Name, route) glog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -276,7 +287,7 @@ func (az *Cloud) DeleteRouteWithRetry(routeName string) error { resp, err := az.RoutesClient.Delete(ctx, az.ResourceGroup, az.RouteTableName, routeName) glog.V(10).Infof("RoutesClient.Delete(%s): end", az.RouteTableName) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -288,7 +299,7 @@ func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM c resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM) glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } @@ -297,39 +308,12 @@ func (az *Cloud) UpdateVmssVMWithRetry(ctx context.Context, resourceGroupName st return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) { resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) glog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID) - return processHTTPRetryResponse(resp, err) + return az.processHTTPRetryResponse(nil, "", resp, err) }) } -// A wait.ConditionFunc function to deal with common HTTP backoff response conditions -func processRetryResponse(resp autorest.Response, err error) (bool, error) { - if isSuccessHTTPResponse(resp) { - glog.V(2).Infof("processRetryResponse: backoff success, HTTP response=%d", resp.StatusCode) - return true, nil - } - if shouldRetryAPIRequest(resp, err) { - glog.Errorf("processRetryResponse: backoff failure, will retry, HTTP response=%d, err=%v", resp.StatusCode, err) - // suppress the error object so that backoff process continues - return false, nil - } - // Fall-through: stop periodic backoff - return true, nil -} - -// shouldRetryAPIRequest determines if the response from an HTTP request suggests periodic retry behavior -func shouldRetryAPIRequest(resp autorest.Response, err error) bool { - if err != nil { - return true - } - // HTTP 4xx or 5xx suggests we should retry - if 399 < resp.StatusCode && resp.StatusCode < 600 { - return true - } - return false -} - // isSuccessHTTPResponse determines if the response from an HTTP request suggests success -func isSuccessHTTPResponse(resp autorest.Response) bool { +func isSuccessHTTPResponse(resp http.Response) bool { // HTTP 2xx suggests a successful response if 199 < resp.StatusCode && resp.StatusCode < 300 { return true @@ -352,18 +336,18 @@ func shouldRetryHTTPRequest(resp *http.Response, err error) bool { return false } -func processHTTPRetryResponse(resp *http.Response, err error) (bool, error) { - if resp != nil { +func (az *Cloud) processHTTPRetryResponse(service *v1.Service, reason string, resp *http.Response, err error) (bool, error) { + if resp != nil && isSuccessHTTPResponse(*resp) { // HTTP 2xx suggests a successful response - if 199 < resp.StatusCode && resp.StatusCode < 300 { - return true, nil - } + return true, nil } if shouldRetryHTTPRequest(resp, err) { if err != nil { + az.Event(service, v1.EventTypeWarning, reason, err.Error()) glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, err=%v", err) } else { + az.Event(service, v1.EventTypeWarning, reason, fmt.Sprintf("Azure HTTP response %d", resp.StatusCode)) glog.Errorf("processHTTPRetryResponse: backoff failure, will retry, HTTP response=%d", resp.StatusCode) } diff --git a/pkg/cloudprovider/providers/azure/azure_backoff_test.go b/pkg/cloudprovider/providers/azure/azure_backoff_test.go index 3e60e2460d9..767c7cc30e3 100644 --- a/pkg/cloudprovider/providers/azure/azure_backoff_test.go +++ b/pkg/cloudprovider/providers/azure/azure_backoff_test.go @@ -20,11 +20,9 @@ import ( "fmt" "net/http" "testing" - - "github.com/Azure/go-autorest/autorest" ) -func TestShouldRetry(t *testing.T) { +func TestShouldRetryHTTPRequest(t *testing.T) { tests := []struct { code int err error @@ -54,12 +52,10 @@ func TestShouldRetry(t *testing.T) { } for _, test := range tests { - resp := autorest.Response{ - Response: &http.Response{ - StatusCode: test.code, - }, + resp := &http.Response{ + StatusCode: test.code, } - res := shouldRetryAPIRequest(resp, test.err) + res := shouldRetryHTTPRequest(resp, test.err) if res != test.expected { t.Errorf("expected: %v, saw: %v", test.expected, res) } @@ -86,10 +82,8 @@ func TestIsSuccessResponse(t *testing.T) { } for _, test := range tests { - resp := autorest.Response{ - Response: &http.Response{ - StatusCode: test.code, - }, + resp := http.Response{ + StatusCode: test.code, } res := isSuccessHTTPResponse(resp) if res != test.expected { @@ -99,6 +93,7 @@ func TestIsSuccessResponse(t *testing.T) { } func TestProcessRetryResponse(t *testing.T) { + az := &Cloud{} tests := []struct { code int err error @@ -132,12 +127,10 @@ func TestProcessRetryResponse(t *testing.T) { } for _, test := range tests { - resp := autorest.Response{ - Response: &http.Response{ - StatusCode: test.code, - }, + resp := &http.Response{ + StatusCode: test.code, } - res, err := processRetryResponse(resp, test.err) + res, err := az.processHTTPRetryResponse(nil, "", resp, test.err) if res != test.stop { t.Errorf("expected: %v, saw: %v", test.stop, res) } diff --git a/pkg/cloudprovider/providers/azure/azure_fakes.go b/pkg/cloudprovider/providers/azure/azure_fakes.go index 67d0b2be278..6d51fc56ea9 100644 --- a/pkg/cloudprovider/providers/azure/azure_fakes.go +++ b/pkg/cloudprovider/providers/azure/azure_fakes.go @@ -207,6 +207,13 @@ func (fAPC *fakeAzurePIPClient) List(ctx context.Context, resourceGroupName stri return value, nil } +func (fAPC *fakeAzurePIPClient) setFakeStore(store map[string]map[string]network.PublicIPAddress) { + fAPC.mutex.Lock() + defer fAPC.mutex.Unlock() + + fAPC.FakeStore = store +} + type fakeAzureInterfacesClient struct { mutex *sync.Mutex FakeStore map[string]map[string]network.Interface @@ -247,7 +254,24 @@ func (fIC *fakeAzureInterfacesClient) Get(ctx context.Context, resourceGroupName } func (fIC *fakeAzureInterfacesClient) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result network.Interface, err error) { - return result, nil + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + if _, ok := fIC.FakeStore[resourceGroupName]; ok { + if entity, ok := fIC.FakeStore[resourceGroupName][networkInterfaceName]; ok { + return entity, nil + } + } + return result, autorest.DetailedError{ + StatusCode: http.StatusNotFound, + Message: "Not such Interface", + } +} + +func (fIC *fakeAzureInterfacesClient) setFakeStore(store map[string]map[string]network.Interface) { + fIC.mutex.Lock() + defer fIC.mutex.Unlock() + + fIC.FakeStore = store } type fakeAzureVirtualMachinesClient struct { @@ -874,11 +898,11 @@ func (f *fakeVMSet) GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availa return nil, fmt.Errorf("unimplemented") } -func (f *fakeVMSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (f *fakeVMSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { return fmt.Errorf("unimplemented") } -func (f *fakeVMSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (f *fakeVMSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { return fmt.Errorf("unimplemented") } @@ -894,6 +918,6 @@ func (f *fakeVMSet) GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, e return nil, fmt.Errorf("unimplemented") } -func (f *fakeVMSet) GetProvisioningStateByNodeName(name string) (string, error) { +func (f *fakeVMSet) GetPowerStatusByNodeName(name string) (string, error) { return "", fmt.Errorf("unimplemented") } diff --git a/pkg/cloudprovider/providers/azure/azure_instances.go b/pkg/cloudprovider/providers/azure/azure_instances.go index 4f609fa72f7..88f049956f8 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances.go +++ b/pkg/cloudprovider/providers/azure/azure_instances.go @@ -28,6 +28,12 @@ import ( "k8s.io/apimachinery/pkg/types" ) +const ( + vmPowerStatePrefix = "PowerState/" + vmPowerStateStopped = "stopped" + vmPowerStateDeallocated = "deallocated" +) + // NodeAddresses returns the addresses of the specified instance. func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) { // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. @@ -81,6 +87,15 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N if err != nil { return nil, err } + + // Fall back to ARM API if the address is empty string. + // TODO: this is a workaround because IMDS is not stable enough. + // It should be removed after IMDS fixing the issue. + if strings.TrimSpace(ipAddress.PrivateIP) == "" { + return addressGetter(name) + } + + // Use ip address got from instance metadata. addresses := []v1.NodeAddress{ {Type: v1.NodeInternalIP, Address: ipAddress.PrivateIP}, {Type: v1.NodeHostName, Address: string(name)}, @@ -148,12 +163,13 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - provisioningState, err := az.vmSet.GetProvisioningStateByNodeName(string(nodeName)) + powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName)) if err != nil { return false, err } + glog.V(5).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) - return strings.ToLower(provisioningState) == "stopped" || strings.ToLower(provisioningState) == "deallocated", nil + return strings.ToLower(powerStatus) == vmPowerStateStopped || strings.ToLower(powerStatus) == vmPowerStateDeallocated, nil } // getComputeMetadata gets compute information from instance metadata. diff --git a/pkg/cloudprovider/providers/azure/azure_instances_test.go b/pkg/cloudprovider/providers/azure/azure_instances_test.go index 106cb0f2767..20680b5150c 100644 --- a/pkg/cloudprovider/providers/azure/azure_instances_test.go +++ b/pkg/cloudprovider/providers/azure/azure_instances_test.go @@ -24,23 +24,41 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/apimachinery/pkg/types" ) -func setTestVirtualMachines(c *Cloud, vmList []string) { +// setTestVirtualMachines sets test virtual machine with powerstate. +func setTestVirtualMachines(c *Cloud, vmList map[string]string) { virtualMachineClient := c.VirtualMachinesClient.(*fakeAzureVirtualMachinesClient) store := map[string]map[string]compute.VirtualMachine{ "rg": make(map[string]compute.VirtualMachine), } - for i := range vmList { - nodeName := vmList[i] - instanceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) - store["rg"][nodeName] = compute.VirtualMachine{ + for nodeName, powerState := range vmList { + instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName) + vm := compute.VirtualMachine{ Name: &nodeName, ID: &instanceID, Location: &c.Location, } + if powerState != "" { + status := []compute.InstanceViewStatus{ + { + Code: to.StringPtr(powerState), + }, + { + Code: to.StringPtr("ProvisioningState/succeeded"), + }, + } + vm.VirtualMachineProperties = &compute.VirtualMachineProperties{ + InstanceView: &compute.VirtualMachineInstanceView{ + Statuses: &status, + }, + } + } + + store["rg"][nodeName] = vm } virtualMachineClient.setFakeStore(store) @@ -63,14 +81,14 @@ func TestInstanceID(t *testing.T) { vmList: []string{"vm1"}, nodeName: "vm1", metadataName: "vm1", - expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", + expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", }, { name: "InstanceID should get instanceID from Azure API if node is not local instance", vmList: []string{"vm2"}, nodeName: "vm2", metadataName: "vm1", - expected: "/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2", + expected: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2", }, { name: "InstanceID should report error if VM doesn't exist", @@ -96,7 +114,11 @@ func TestInstanceID(t *testing.T) { defer listener.Close() cloud.metadata.baseURL = "http://" + listener.Addr().String() + "/" - setTestVirtualMachines(cloud, test.vmList) + vmListWithPowerState := make(map[string]string) + for _, vm := range test.vmList { + vmListWithPowerState[vm] = "" + } + setTestVirtualMachines(cloud, vmListWithPowerState) instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName)) if test.expectError { if err == nil { @@ -113,3 +135,82 @@ func TestInstanceID(t *testing.T) { } } } + +func TestInstanceShutdownByProviderID(t *testing.T) { + testcases := []struct { + name string + vmList map[string]string + nodeName string + expected bool + expectError bool + }{ + { + name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status", + vmList: map[string]string{"vm1": "PowerState/Running"}, + nodeName: "vm1", + expected: false, + }, + { + name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status", + vmList: map[string]string{"vm2": "PowerState/Deallocated"}, + nodeName: "vm2", + expected: true, + }, + { + name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status", + vmList: map[string]string{"vm3": "PowerState/Deallocating"}, + nodeName: "vm3", + expected: false, + }, + { + name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status", + vmList: map[string]string{"vm4": "PowerState/Starting"}, + nodeName: "vm4", + expected: false, + }, + { + name: "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status", + vmList: map[string]string{"vm5": "PowerState/Stopped"}, + nodeName: "vm5", + expected: true, + }, + { + name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status", + vmList: map[string]string{"vm6": "PowerState/Stopping"}, + nodeName: "vm6", + expected: false, + }, + { + name: "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status", + vmList: map[string]string{"vm7": "PowerState/Unknown"}, + nodeName: "vm7", + expected: false, + }, + { + name: "InstanceShutdownByProviderID should report error if VM doesn't exist", + vmList: map[string]string{"vm1": "PowerState/running"}, + nodeName: "vm8", + expectError: true, + }, + } + + for _, test := range testcases { + cloud := getTestCloud() + setTestVirtualMachines(cloud, test.vmList) + providerID := "azure://" + cloud.getStandardMachineID("rg", test.nodeName) + hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), providerID) + if test.expectError { + if err == nil { + t.Errorf("Test [%s] unexpected nil err", test.name) + } + } else { + if err != nil { + t.Errorf("Test [%s] unexpected error: %v", test.name, err) + } + } + + if hasShutdown != test.expected { + t.Errorf("Test [%s] unexpected hasShutdown: %v, expected %v", test.name, hasShutdown, test.expected) + } + } +} diff --git a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go index 70ec1e0531e..7d5d176ef92 100644 --- a/pkg/cloudprovider/providers/azure/azure_loadbalancer.go +++ b/pkg/cloudprovider/providers/azure/azure_loadbalancer.go @@ -210,7 +210,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, primaryVMSetName := az.vmSet.GetPrimaryVMSetName() defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) - existingLBs, err := az.ListLBWithRetry() + existingLBs, err := az.ListLBWithRetry(service) if err != nil { return nil, nil, false, err } @@ -387,7 +387,7 @@ func (az *Cloud) determinePublicIPName(clusterName string, service *v1.Service) pipResourceGroup := az.getPublicIPAddressResourceGroup(service) - pips, err := az.ListPIPWithRetry(pipResourceGroup) + pips, err := az.ListPIPWithRetry(service, pipResourceGroup) if err != nil { return "", err } @@ -475,7 +475,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai glog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) glog.V(10).Infof("CreateOrUpdatePIPWithRetry(%s, %q): start", pipResourceGroup, *pip.Name) - err = az.CreateOrUpdatePIPWithRetry(pipResourceGroup, pip) + err = az.CreateOrUpdatePIPWithRetry(service, pipResourceGroup, pip) if err != nil { glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - creating", serviceName, *pip.Name) return nil, err @@ -488,7 +488,6 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai if err != nil { return nil, err } - return &pip, nil } @@ -811,7 +810,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) glog.V(10).Infof("EnsureBackendPoolDeleted(%s, %s): start", lbBackendPoolID, vmSetName) - err := az.vmSet.EnsureBackendPoolDeleted(lbBackendPoolID, vmSetName, lb.BackendAddressPools) + err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { glog.Errorf("EnsureBackendPoolDeleted(%s, %s) failed: %v", lbBackendPoolID, vmSetName, err) return nil, err @@ -820,7 +819,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove the LB. glog.V(10).Infof("reconcileLoadBalancer: az.DeleteLBWithRetry(%q): start", lbName) - err = az.DeleteLBWithRetry(lbName) + err = az.DeleteLBWithRetry(service, lbName) if err != nil { glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - deleting; no remaining frontendIPConfigurations", serviceName, lbName) return nil, err @@ -828,7 +827,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, glog.V(10).Infof("az.DeleteLBWithRetry(%q): end", lbName) } else { glog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) - err := az.CreateOrUpdateLBWithRetry(*lb) + err := az.CreateOrUpdateLBWithRetry(service, *lb) if err != nil { glog.V(2).Infof("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating", serviceName, lbName) return nil, err @@ -852,7 +851,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, if wantLb && nodes != nil { // Add the machines to the backend pool if they're not already vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) - err := az.vmSet.EnsureHostsInPool(serviceName, nodes, lbBackendPoolID, vmSetName, isInternal) + err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) if err != nil { return nil, err } @@ -1145,7 +1144,7 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, sg.SecurityRules = &updatedRules glog.V(2).Infof("reconcileSecurityGroup for service(%s): sg(%s) - updating", serviceName, *sg.Name) glog.V(10).Infof("CreateOrUpdateSGWithRetry(%q): start", *sg.Name) - err := az.CreateOrUpdateSGWithRetry(sg) + err := az.CreateOrUpdateSGWithRetry(service, sg) if err != nil { glog.V(2).Infof("ensure(%s) abort backoff: sg(%s) - updating", serviceName, *sg.Name) // TODO (Nov 2017): remove when augmented security rules are out of preview @@ -1316,7 +1315,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want pipResourceGroup := az.getPublicIPAddressResourceGroup(service) - pips, err := az.ListPIPWithRetry(pipResourceGroup) + pips, err := az.ListPIPWithRetry(service, pipResourceGroup) if err != nil { return nil, err } @@ -1333,7 +1332,7 @@ func (az *Cloud) reconcilePublicIP(clusterName string, service *v1.Service, want } else { glog.V(2).Infof("reconcilePublicIP for service(%s): pip(%s) - deleting", serviceName, pipName) glog.V(10).Infof("DeletePublicIPWithRetry(%s, %q): start", pipResourceGroup, pipName) - err = az.DeletePublicIPWithRetry(pipResourceGroup, pipName) + err = az.DeletePublicIPWithRetry(service, pipResourceGroup, pipName) if err != nil { glog.V(2).Infof("ensure(%s) abort backoff: pip(%s) - deleting", serviceName, pipName) // We let err to pass through diff --git a/pkg/cloudprovider/providers/azure/azure_standard.go b/pkg/cloudprovider/providers/azure/azure_standard.go index d75c7fdc8cd..54b9e3cb11b 100644 --- a/pkg/cloudprovider/providers/azure/azure_standard.go +++ b/pkg/cloudprovider/providers/azure/azure_standard.go @@ -346,13 +346,24 @@ func (as *availabilitySet) GetInstanceIDByNodeName(name string) (string, error) return *machine.ID, nil } -func (as *availabilitySet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) { +// GetPowerStatusByNodeName returns the power state of the specified node. +func (as *availabilitySet) GetPowerStatusByNodeName(name string) (powerState string, err error) { vm, err := as.getVirtualMachine(types.NodeName(name)) if err != nil { - return provisioningState, err + return powerState, err } - return *vm.ProvisioningState, nil + if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { + statuses := *vm.InstanceView.Statuses + for _, status := range statuses { + state := to.String(status.Code) + if strings.HasPrefix(state, vmPowerStatePrefix) { + return strings.TrimPrefix(state, vmPowerStatePrefix), nil + } + } + } + + return "", fmt.Errorf("failed to get power status for node %q", name) } // GetNodeNameByProviderID gets the node name by provider ID. @@ -620,8 +631,9 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(nodeName, vmSetName stri // ensureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. -func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { +func (as *availabilitySet) ensureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) error { vmName := mapNodeNameToVMName(nodeName) + serviceName := getServiceName(service) nic, err := as.getPrimaryInterfaceWithVMSet(vmName, vmSetName) if err != nil { if err == errNotInVMSet { @@ -689,7 +701,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N glog.V(10).Infof("InterfacesClient.CreateOrUpdate(%q): end", *nic.Name) if as.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { glog.V(2).Infof("nicupdate(%s) backing off: nic(%s) - updating, err=%v", serviceName, nicName, err) - retryErr := as.CreateOrUpdateInterfaceWithRetry(nic) + retryErr := as.CreateOrUpdateInterfaceWithRetry(service, nic) if retryErr != nil { err = retryErr glog.V(2).Infof("nicupdate(%s) abort backoff: nic(%s) - updating", serviceName, nicName) @@ -704,7 +716,7 @@ func (as *availabilitySet) ensureHostInPool(serviceName string, nodeName types.N // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. -func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { hostUpdates := make([]func() error, 0, len(nodes)) for _, node := range nodes { localNodeName := node.Name @@ -719,9 +731,9 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod } f := func() error { - err := as.ensureHostInPool(serviceName, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) + err := as.ensureHostInPool(service, types.NodeName(localNodeName), backendPoolID, vmSetName, isInternal) if err != nil { - return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", serviceName, backendPoolID, err) + return fmt.Errorf("ensure(%s): backendPoolID(%s) - failed to ensure host in pool: %q", getServiceName(service), backendPoolID, err) } return nil } @@ -737,7 +749,7 @@ func (as *availabilitySet) EnsureHostsInPool(serviceName string, nodes []*v1.Nod } // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (as *availabilitySet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { // Do nothing for availability set. return nil } diff --git a/pkg/cloudprovider/providers/azure/azure_vmsets.go b/pkg/cloudprovider/providers/azure/azure_vmsets.go index 4752d3321f8..c772813aa21 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmsets.go +++ b/pkg/cloudprovider/providers/azure/azure_vmsets.go @@ -54,9 +54,9 @@ type VMSet interface { GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error) // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. - EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error + EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. - EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error + EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error // AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun. AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error @@ -65,6 +65,6 @@ type VMSet interface { // GetDataDisks gets a list of data disks attached to the node. GetDataDisks(nodeName types.NodeName) ([]compute.DataDisk, error) - // GetProvisioningStateByNodeName gets the provisioning state by node name. - GetProvisioningStateByNodeName(name string) (string, error) + // GetPowerStatusByNodeName returns the power state of the specified node. + GetPowerStatusByNodeName(name string) (string, error) } diff --git a/pkg/cloudprovider/providers/azure/azure_vmss.go b/pkg/cloudprovider/providers/azure/azure_vmss.go index d2d1a3b82c2..2c5ac451eda 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss.go @@ -128,13 +128,24 @@ func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm co return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil } -func (ss *scaleSet) GetProvisioningStateByNodeName(name string) (provisioningState string, err error) { +// GetPowerStatusByNodeName returns the power state of the specified node. +func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { _, _, vm, err := ss.getVmssVM(name) if err != nil { - return provisioningState, err + return powerState, err } - return *vm.ProvisioningState, nil + if vm.InstanceView != nil && vm.InstanceView.Statuses != nil { + statuses := *vm.InstanceView.Statuses + for _, status := range statuses { + state := to.String(status.Code) + if strings.HasPrefix(state, vmPowerStatePrefix) { + return strings.TrimPrefix(state, vmPowerStatePrefix), nil + } + } + } + + return "", fmt.Errorf("failed to get power status for node %q", name) } // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. @@ -280,8 +291,6 @@ func (ss *scaleSet) GetPrimaryVMSetName() string { } // GetIPByNodeName gets machine private IP and public IP by node name. -// TODO(feiskyer): Azure vmss doesn't support associating a public IP to single virtual machine yet, -// fix this after it is supported. func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) { nic, err := ss.GetPrimaryInterface(nodeName) if err != nil { @@ -295,8 +304,30 @@ func (ss *scaleSet) GetIPByNodeName(nodeName string) (string, string, error) { return "", "", err } - targetIP := *ipConfig.PrivateIPAddress - return targetIP, "", nil + internalIP := *ipConfig.PrivateIPAddress + publicIP := "" + if ipConfig.PublicIPAddress != nil && ipConfig.PublicIPAddress.ID != nil { + pipID := *ipConfig.PublicIPAddress.ID + pipName, err := getLastSegment(pipID) + if err != nil { + return "", "", fmt.Errorf("failed to get publicIP name for node %q with pipID %q", nodeName, pipID) + } + + resourceGroup, err := ss.GetNodeResourceGroup(nodeName) + if err != nil { + return "", "", err + } + + pip, existsPip, err := ss.getPublicIPAddress(resourceGroup, pipName) + if err != nil { + return "", "", err + } + if existsPip { + publicIP = *pip.IPAddress + } + } + + return internalIP, publicIP, nil } // This returns the full identifier of the primary NIC for the given VM. @@ -533,13 +564,14 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err } // getScaleSetWithRetry gets scale set with exponential backoff retry -func (ss *scaleSet) getScaleSetWithRetry(name string) (compute.VirtualMachineScaleSet, bool, error) { +func (ss *scaleSet) getScaleSetWithRetry(service *v1.Service, name string) (compute.VirtualMachineScaleSet, bool, error) { var result compute.VirtualMachineScaleSet var exists bool err := wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { cached, retryErr := ss.vmssCache.Get(name) if retryErr != nil { + ss.Event(service, v1.EventTypeWarning, "GetVirtualMachineScaleSet", retryErr.Error()) glog.Errorf("backoff: failure for scale set %q, will retry,err=%v", name, retryErr) return false, nil } @@ -590,24 +622,24 @@ func (ss *scaleSet) getPrimaryIPConfigForScaleSet(config *compute.VirtualMachine } // createOrUpdateVMSSWithRetry invokes ss.VirtualMachineScaleSetsClient.CreateOrUpdate with exponential backoff retry. -func (ss *scaleSet) createOrUpdateVMSSWithRetry(virtualMachineScaleSet compute.VirtualMachineScaleSet) error { +func (ss *scaleSet) createOrUpdateVMSSWithRetry(service *v1.Service, virtualMachineScaleSet compute.VirtualMachineScaleSet) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, ss.ResourceGroup, *virtualMachineScaleSet.Name, virtualMachineScaleSet) glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", *virtualMachineScaleSet.Name) - return processHTTPRetryResponse(resp, err) + return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSS", resp, err) }) } // updateVMSSInstancesWithRetry invokes ss.VirtualMachineScaleSetsClient.UpdateInstances with exponential backoff retry. -func (ss *scaleSet) updateVMSSInstancesWithRetry(scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { +func (ss *scaleSet) updateVMSSInstancesWithRetry(service *v1.Service, scaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) error { return wait.ExponentialBackoff(ss.requestBackoff(), func() (bool, error) { ctx, cancel := getContextWithCancel() defer cancel() resp, err := ss.VirtualMachineScaleSetsClient.UpdateInstances(ctx, ss.ResourceGroup, scaleSetName, vmInstanceIDs) glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%s): end", scaleSetName) - return processHTTPRetryResponse(resp, err) + return ss.processHTTPRetryResponse(service, "CreateOrUpdateVMSSInstance", resp, err) }) } @@ -652,9 +684,10 @@ func (ss *scaleSet) getNodesScaleSets(nodes []*v1.Node) (map[string]sets.String, // ensureHostsInVMSetPool ensures the given Node's primary IP configurations are // participating in the vmSet's LoadBalancer Backend Pool. -func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { +func (ss *scaleSet) ensureHostsInVMSetPool(service *v1.Service, backendPoolID string, vmSetName string, instanceIDs []string, isInternal bool) error { glog.V(3).Infof("ensuring hosts %q of scaleset %q in LB backendpool %q", instanceIDs, vmSetName, backendPoolID) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(vmSetName) + serviceName := getServiceName(service) + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, vmSetName) if err != nil { glog.Errorf("ss.getScaleSetWithRetry(%s) for service %q failed: %v", vmSetName, serviceName, err) return err @@ -722,7 +755,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) @@ -743,7 +776,7 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", vmSetName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s): scale set (%s) - updating, err=%v", serviceName, vmSetName, err) - retryErr := ss.updateVMSSInstancesWithRetry(vmSetName, vmInstanceIDs) + retryErr := ss.updateVMSSInstancesWithRetry(service, vmSetName, vmInstanceIDs) if retryErr != nil { err = retryErr glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances for service (%s) abort backoff: scale set (%s) - updating", serviceName, vmSetName) @@ -758,7 +791,8 @@ func (ss *scaleSet) ensureHostsInVMSetPool(serviceName string, backendPoolID str // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. -func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { +func (ss *scaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error { + serviceName := getServiceName(service) scalesets, standardNodes, err := ss.getNodesScaleSets(nodes) if err != nil { glog.Errorf("getNodesScaleSets() for service %q failed: %v", serviceName, err) @@ -778,7 +812,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back instanceIDs.Insert("*") } - err := ss.ensureHostsInVMSetPool(serviceName, backendPoolID, ssName, instanceIDs.List(), isInternal) + err := ss.ensureHostsInVMSetPool(service, backendPoolID, ssName, instanceIDs.List(), isInternal) if err != nil { glog.Errorf("ensureHostsInVMSetPool() with scaleSet %q for service %q failed: %v", ssName, serviceName, err) return err @@ -786,7 +820,7 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back } if ss.useStandardLoadBalancer() && len(standardNodes) > 0 { - err := ss.availabilitySet.EnsureHostsInPool(serviceName, standardNodes, backendPoolID, "", isInternal) + err := ss.availabilitySet.EnsureHostsInPool(service, standardNodes, backendPoolID, "", isInternal) if err != nil { glog.Errorf("availabilitySet.EnsureHostsInPool() for service %q failed: %v", serviceName, err) return err @@ -797,9 +831,9 @@ func (ss *scaleSet) EnsureHostsInPool(serviceName string, nodes []*v1.Node, back } // ensureScaleSetBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified scaleset. -func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) error { +func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(service *v1.Service, poolID, ssName string) error { glog.V(3).Infof("ensuring backend pool %q deleted from scaleset %q", poolID, ssName) - virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(ssName) + virtualMachineScaleSet, exists, err := ss.getScaleSetWithRetry(service, ssName) if err != nil { glog.Errorf("ss.ensureScaleSetBackendPoolDeleted(%s, %s) getScaleSetWithRetry(%s) failed: %v", poolID, ssName, ssName, err) return err @@ -851,7 +885,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { err = retryErr glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) @@ -872,7 +906,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro glog.V(10).Infof("VirtualMachineScaleSetsClient.UpdateInstances(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(instanceResp, err) { glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.updateVMSSInstancesWithRetry(ssName, vmInstanceIDs) + retryErr := ss.updateVMSSInstancesWithRetry(service, ssName, vmInstanceIDs) if retryErr != nil { err = retryErr glog.V(2).Infof("VirtualMachineScaleSetsClient.UpdateInstances abort backoff: scale set (%s) - updating", ssName) @@ -892,7 +926,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro glog.V(10).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate(%q): end", ssName) if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) { glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate: scale set (%s) - updating, err=%v", ssName, err) - retryErr := ss.createOrUpdateVMSSWithRetry(virtualMachineScaleSet) + retryErr := ss.createOrUpdateVMSSWithRetry(service, virtualMachineScaleSet) if retryErr != nil { glog.V(2).Infof("VirtualMachineScaleSetsClient.CreateOrUpdate abort backoff: scale set (%s) - updating", ssName) } @@ -903,7 +937,7 @@ func (ss *scaleSet) ensureScaleSetBackendPoolDeleted(poolID, ssName string) erro } // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified vmSet. -func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { +func (ss *scaleSet) EnsureBackendPoolDeleted(service *v1.Service, poolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error { if backendAddressPools == nil { return nil } @@ -934,7 +968,7 @@ func (ss *scaleSet) EnsureBackendPoolDeleted(poolID, vmSetName string, backendAd continue } - err := ss.ensureScaleSetBackendPoolDeleted(poolID, ssName) + err := ss.ensureScaleSetBackendPoolDeleted(service, poolID, ssName) if err != nil { glog.Errorf("ensureScaleSetBackendPoolDeleted() with scaleSet %q failed: %v", ssName, err) return err diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go index c09f71f588f..73bc59ea4e7 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_cache.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_cache.go @@ -199,6 +199,24 @@ func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { return nil, nil } + // Get instanceView for vmssVM. + if result.InstanceView == nil { + viewCtx, viewCancel := getContextWithCancel() + defer viewCancel() + view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID) + // It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again. + exists, message, realErr = checkResourceExistsFromError(err) + if realErr != nil { + return nil, realErr + } + if !exists { + glog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) + return nil, nil + } + + result.InstanceView = &view + } + return &result, nil } diff --git a/pkg/cloudprovider/providers/azure/azure_vmss_test.go b/pkg/cloudprovider/providers/azure/azure_vmss_test.go index cdee4aacaca..f7d431992d6 100644 --- a/pkg/cloudprovider/providers/azure/azure_vmss_test.go +++ b/pkg/cloudprovider/providers/azure/azure_vmss_test.go @@ -21,9 +21,16 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network" + "github.com/Azure/go-autorest/autorest/to" "github.com/stretchr/testify/assert" ) +const ( + fakePrivateIP = "10.240.0.10" + fakePublicIP = "10.10.10.10" +) + func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []string) (*scaleSet, error) { cloud := getTestCloud() setTestVirtualMachineCloud(cloud, scaleSetName, zone, faultDomain, vmList) @@ -37,6 +44,11 @@ func newTestScaleSet(scaleSetName, zone string, faultDomain int32, vmList []stri func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomain int32, vmList []string) { virtualMachineScaleSetsClient := newFakeVirtualMachineScaleSetsClient() + virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient() + publicIPAddressesClient := newFakeAzurePIPClient("rg") + interfaceClient := newFakeAzureInterfacesClient() + + // set test scale sets. scaleSets := make(map[string]map[string]compute.VirtualMachineScaleSet) scaleSets["rg"] = map[string]compute.VirtualMachineScaleSet{ scaleSetName: { @@ -45,17 +57,27 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomai } virtualMachineScaleSetsClient.setFakeStore(scaleSets) - virtualMachineScaleSetVMsClient := newFakeVirtualMachineScaleSetVMsClient() - ssVMs := make(map[string]map[string]compute.VirtualMachineScaleSetVM) - ssVMs["rg"] = make(map[string]compute.VirtualMachineScaleSetVM) + testInterfaces := map[string]map[string]network.Interface{ + "rg": make(map[string]network.Interface), + } + testPIPs := map[string]map[string]network.PublicIPAddress{ + "rg": make(map[string]network.PublicIPAddress), + } + ssVMs := map[string]map[string]compute.VirtualMachineScaleSetVM{ + "rg": make(map[string]compute.VirtualMachineScaleSetVM), + } for i := range vmList { - ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i) nodeName := vmList[i] + ID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d", scaleSetName, i) + interfaceID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s", scaleSetName, i, nodeName) + publicAddressID := fmt.Sprintf("/subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%d/networkInterfaces/%s/ipConfigurations/ipconfig1/publicIPAddresses/%s", scaleSetName, i, nodeName, nodeName) instanceID := fmt.Sprintf("%d", i) vmName := fmt.Sprintf("%s_%s", scaleSetName, instanceID) + + // set vmss virtual machine. networkInterfaces := []compute.NetworkInterfaceReference{ { - ID: &nodeName, + ID: &interfaceID, }, } vmssVM := compute.VirtualMachineScaleSetVM{ @@ -75,17 +97,46 @@ func setTestVirtualMachineCloud(ss *Cloud, scaleSetName, zone string, faultDomai Name: &vmName, Location: &ss.Location, } - if zone != "" { zones := []string{zone} vmssVM.Zones = &zones } ssVMs["rg"][vmName] = vmssVM + + // set interfaces. + testInterfaces["rg"][nodeName] = network.Interface{ + ID: &interfaceID, + InterfacePropertiesFormat: &network.InterfacePropertiesFormat{ + IPConfigurations: &[]network.InterfaceIPConfiguration{ + { + InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{ + Primary: to.BoolPtr(true), + PrivateIPAddress: to.StringPtr(fakePrivateIP), + PublicIPAddress: &network.PublicIPAddress{ + ID: to.StringPtr(publicAddressID), + }, + }, + }, + }, + }, + } + + // set public IPs. + testPIPs["rg"][nodeName] = network.PublicIPAddress{ + ID: to.StringPtr(publicAddressID), + PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{ + IPAddress: to.StringPtr(fakePublicIP), + }, + } } virtualMachineScaleSetVMsClient.setFakeStore(ssVMs) + interfaceClient.setFakeStore(testInterfaces) + publicIPAddressesClient.setFakeStore(testPIPs) ss.VirtualMachineScaleSetsClient = virtualMachineScaleSetsClient ss.VirtualMachineScaleSetVMsClient = virtualMachineScaleSetVMsClient + ss.InterfacesClient = interfaceClient + ss.PublicIPAddressesClient = publicIPAddressesClient } func TestGetScaleSetVMInstanceID(t *testing.T) { @@ -216,3 +267,43 @@ func TestGetZoneByNodeName(t *testing.T) { assert.Equal(t, test.expected, real.FailureDomain, test.description) } } + +func TestGetIPByNodeName(t *testing.T) { + testCases := []struct { + description string + scaleSet string + vmList []string + nodeName string + expected []string + expectError bool + }{ + { + description: "GetIPByNodeName should get node's privateIP and publicIP", + scaleSet: "ss", + vmList: []string{"vmssee6c2000000", "vmssee6c2000001"}, + nodeName: "vmssee6c2000000", + expected: []string{fakePrivateIP, fakePublicIP}, + }, + { + description: "GetIPByNodeName should return error for non-exist nodes", + scaleSet: "ss", + vmList: []string{"vmssee6c2000000", "vmssee6c2000001"}, + nodeName: "agente6c2000005", + expectError: true, + }, + } + + for _, test := range testCases { + ss, err := newTestScaleSet(test.scaleSet, "", 0, test.vmList) + assert.NoError(t, err, test.description) + + privateIP, publicIP, err := ss.GetIPByNodeName(test.nodeName) + if test.expectError { + assert.Error(t, err, test.description) + continue + } + + assert.NoError(t, err, test.description) + assert.Equal(t, test.expected, []string{privateIP, publicIP}, test.description) + } +} diff --git a/pkg/cloudprovider/providers/gce/BUILD b/pkg/cloudprovider/providers/gce/BUILD index 94f70388b2e..9565231835a 100644 --- a/pkg/cloudprovider/providers/gce/BUILD +++ b/pkg/cloudprovider/providers/gce/BUILD @@ -20,6 +20,7 @@ go_library( "gce_clusterid.go", "gce_clusters.go", "gce_disks.go", + "gce_fake.go", "gce_firewall.go", "gce_forwardingrule.go", "gce_healthchecks.go", @@ -50,12 +51,12 @@ go_library( "//pkg/cloudprovider/providers/gce/cloud:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/filter:go_default_library", "//pkg/cloudprovider/providers/gce/cloud/meta:go_default_library", + "//pkg/cloudprovider/providers/gce/cloud/mock:go_default_library", "//pkg/controller:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/util/net/sets:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -67,6 +68,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", @@ -120,7 +122,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/require:go_default_library", diff --git a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go index 6a28c9f8e4b..0489bf8d4a3 100644 --- a/pkg/cloudprovider/providers/gce/cloud/mock/mock.go +++ b/pkg/cloudprovider/providers/gce/cloud/mock/mock.go @@ -44,6 +44,8 @@ var ( InUseError = &googleapi.Error{Code: http.StatusBadRequest, Message: "It's being used by god."} // InternalServerError is shared variable with error code StatusInternalServerError for error verification. InternalServerError = &googleapi.Error{Code: http.StatusInternalServerError} + // UnauthorizedErr wraps a Google API error with code StatusForbidden. + UnauthorizedErr = &googleapi.Error{Code: http.StatusForbidden} ) // gceObject is an abstraction of all GCE API object in go client @@ -436,6 +438,82 @@ func UpdateRegionBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga. return nil } +// UpdateBackendServiceHook defines the hook for updating a BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBackendServiceHook(ctx context.Context, key *meta.Key, obj *ga.BackendService, m *cloud.MockBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateAlphaBackendServiceHook defines the hook for updating an alpha BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateAlphaBackendServiceHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "alpha", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionAlpha, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateBetaBackendServiceHook defines the hook for updating an beta BackendService. +// It replaces the object with the same key in the mock with the updated object. +func UpdateBetaBackendServiceHook(ctx context.Context, key *meta.Key, obj *beta.BackendService, m *cloud.MockBetaBackendServices) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in BackendServices", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "beta", "backendServices") + obj.SelfLink = cloud.SelfLink(meta.VersionBeta, projectID, "backendServices", key) + + m.Objects[*key] = &cloud.MockBackendServicesObj{Obj: obj} + return nil +} + +// UpdateURLMapHook defines the hook for updating a UrlMap. +// It replaces the object with the same key in the mock with the updated object. +func UpdateURLMapHook(ctx context.Context, key *meta.Key, obj *ga.UrlMap, m *cloud.MockUrlMaps) error { + _, err := m.Get(ctx, key) + if err != nil { + return &googleapi.Error{ + Code: http.StatusNotFound, + Message: fmt.Sprintf("Key: %s was not found in UrlMaps", key.String()), + } + } + + obj.Name = key.Name + projectID := m.ProjectRouter.ProjectID(ctx, "ga", "urlMaps") + obj.SelfLink = cloud.SelfLink(meta.VersionGA, projectID, "urlMaps", key) + + m.Objects[*key] = &cloud.MockUrlMapsObj{Obj: obj} + return nil +} + // InsertFirewallsUnauthorizedErrHook mocks firewall insertion. A forbidden error will be thrown as return. func InsertFirewallsUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *ga.Firewall, m *cloud.MockFirewalls) (bool, error) { return true, &googleapi.Error{Code: http.StatusForbidden} @@ -496,6 +574,16 @@ func DeleteAddressesInternalErrHook(ctx context.Context, key *meta.Key, m *cloud return true, InternalServerError } +// InsertAlphaBackendServiceUnauthorizedErrHook mocks inserting an alpha BackendService and returns a forbidden error. +func InsertAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) (bool, error) { + return true, UnauthorizedErr +} + +// UpdateAlphaBackendServiceUnauthorizedErrHook mocks updating an alpha BackendService and returns a forbidden error. +func UpdateAlphaBackendServiceUnauthorizedErrHook(ctx context.Context, key *meta.Key, obj *alpha.BackendService, m *cloud.MockAlphaBackendServices) error { + return UnauthorizedErr +} + // GetRegionBackendServicesErrHook mocks getting region backend service and returns an internal server error. func GetRegionBackendServicesErrHook(ctx context.Context, key *meta.Key, m *cloud.MockRegionBackendServices) (bool, *ga.BackendService, error) { return true, nil, InternalServerError diff --git a/pkg/cloudprovider/providers/gce/gce_clusters.go b/pkg/cloudprovider/providers/gce/gce_clusters.go index ecd935716a7..cdc685c58ea 100644 --- a/pkg/cloudprovider/providers/gce/gce_clusters.go +++ b/pkg/cloudprovider/providers/gce/gce_clusters.go @@ -18,6 +18,10 @@ package gce import ( "context" + "errors" + "fmt" + + "github.com/golang/glog" container "google.golang.org/api/container/v1" ) @@ -41,16 +45,22 @@ func (gce *GCECloud) ListClusters(ctx context.Context) ([]string, error) { } func (gce *GCECloud) GetManagedClusters(ctx context.Context) ([]*container.Cluster, error) { - managedClusters := []*container.Cluster{} - for _, zone := range gce.managedZones { - clusters, err := gce.getClustersInZone(zone) - if err != nil { - return nil, err - } - managedClusters = append(managedClusters, clusters...) + var location string + if len(gce.managedZones) > 1 { + // Multiple managed zones means this is a regional cluster + // so use the regional location and not the zone. + location = gce.region + } else if len(gce.managedZones) == 1 { + location = gce.managedZones[0] + } else { + return nil, errors.New(fmt.Sprintf("no zones associated with this cluster(%s)", gce.ProjectID())) + } + clusters, err := gce.getClustersInLocation(location) + if err != nil { + return nil, err } - return managedClusters, nil + return clusters, nil } func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, error) { @@ -58,7 +68,7 @@ func (gce *GCECloud) Master(ctx context.Context, clusterName string) (string, er } func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { - clusters, err := gce.getClustersInZone(zone) + clusters, err := gce.getClustersInLocation(zone) if err != nil { return nil, err } @@ -70,13 +80,18 @@ func (gce *GCECloud) listClustersInZone(zone string) ([]string, error) { return result, nil } -func (gce *GCECloud) getClustersInZone(zone string) ([]*container.Cluster, error) { - mc := newClustersMetricContext("list_zone", zone) +func (gce *GCECloud) getClustersInLocation(zoneOrRegion string) ([]*container.Cluster, error) { + // TODO: Issue/68913 migrate metric to list_location instead of list_zone. + mc := newClustersMetricContext("list_zone", zoneOrRegion) // TODO: use PageToken to list all not just the first 500 - list, err := gce.containerService.Projects.Zones.Clusters.List(gce.projectID, zone).Do() + location := getLocationName(gce.projectID, zoneOrRegion) + list, err := gce.containerService.Projects.Locations.Clusters.List(location).Do() if err != nil { return nil, mc.Observe(err) } + if list.Header.Get("nextPageToken") != "" { + glog.Errorf("Failed to get all clusters for request, received next page token %s", list.Header.Get("nextPageToken")) + } return list.Clusters, mc.Observe(nil) } diff --git a/pkg/cloudprovider/providers/gce/gce_fake.go b/pkg/cloudprovider/providers/gce/gce_fake.go new file mode 100644 index 00000000000..f51bc20719f --- /dev/null +++ b/pkg/cloudprovider/providers/gce/gce_fake.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "fmt" + "net/http" + + compute "google.golang.org/api/compute/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" +) + +type TestClusterValues struct { + ProjectID string + Region string + ZoneName string + SecondaryZoneName string + ClusterID string + ClusterName string +} + +func DefaultTestClusterValues() TestClusterValues { + return TestClusterValues{ + ProjectID: "test-project", + Region: "us-central1", + ZoneName: "us-central1-b", + SecondaryZoneName: "us-central1-c", + ClusterID: "test-cluster-id", + ClusterName: "Test Cluster Name", + } +} + +func FakeGCECloud(vals TestClusterValues) *GCECloud { + return simpleFakeGCECloud(vals) +} + +type fakeRoundTripper struct{} + +func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { + return nil, fmt.Errorf("err: test used fake http client") +} + +// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling +// gce.Initialize() +func fakeClusterID(clusterID string) ClusterID { + return ClusterID{ + clusterID: &clusterID, + store: cache.NewStore(func(obj interface{}) (string, error) { + return "", nil + }), + } +} + +func simpleFakeGCECloud(vals TestClusterValues) *GCECloud { + client := &http.Client{Transport: &fakeRoundTripper{}} + service, _ := compute.New(client) + gce := &GCECloud{ + region: vals.Region, + service: service, + managedZones: []string{vals.ZoneName}, + projectID: vals.ProjectID, + networkProjectID: vals.ProjectID, + ClusterID: fakeClusterID(vals.ClusterID), + } + c := cloud.NewMockGCE(&gceProjectRouter{gce}) + gce.c = c + return gce +} diff --git a/pkg/cloudprovider/providers/gce/gce_healthchecks.go b/pkg/cloudprovider/providers/gce/gce_healthchecks.go index 8dc913a6089..f8283851687 100644 --- a/pkg/cloudprovider/providers/gce/gce_healthchecks.go +++ b/pkg/cloudprovider/providers/gce/gce_healthchecks.go @@ -24,11 +24,11 @@ import ( compute "google.golang.org/api/compute/v1" "k8s.io/api/core/v1" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" "k8s.io/kubernetes/pkg/master/ports" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go index 86eea9f87d4..a457ec4e744 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer.go @@ -41,10 +41,6 @@ var ( lbSrcRngsFlag cidrs ) -func newLoadBalancerMetricContext(request, region string) *metricContext { - return newGenericMetricContext("loadbalancer", request, region, unusedMetricLabel, computeV1Version) -} - func init() { var err error // LB L7 proxies and all L3/4/7 health checkers have client addresses within these known CIDRs. diff --git a/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go b/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go index c21cb2e79f2..bdea0d5eb03 100644 --- a/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go +++ b/pkg/cloudprovider/providers/gce/gce_loadbalancer_utils_test.go @@ -23,9 +23,7 @@ package gce import ( "context" "fmt" - "net/http" "strings" - "sync" "testing" "time" @@ -35,12 +33,9 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" v1_service "k8s.io/kubernetes/pkg/api/v1/service" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" - "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" ) @@ -54,26 +49,6 @@ const ( errStrUnsupportedTier = "unsupported network tier: \"" + wrongTier + "\"" ) -type TestClusterValues struct { - ProjectID string - Region string - ZoneName string - SecondaryZoneName string - ClusterID string - ClusterName string -} - -func DefaultTestClusterValues() TestClusterValues { - return TestClusterValues{ - ProjectID: "test-project", - Region: "us-central1", - ZoneName: "us-central1-b", - SecondaryZoneName: "us-central1-c", - ClusterID: "test-cluster-id", - ClusterName: "Test Cluster Name", - } -} - func fakeLoadbalancerService(lbType string) *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -92,73 +67,6 @@ var ( FilewallChangeMsg = fmt.Sprintf("%s %s %s", v1.EventTypeNormal, eventReasonManualChange, eventMsgFirewallChange) ) -type fakeRoundTripper struct{} - -func (*fakeRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { - return nil, fmt.Errorf("err: test used fake http client") -} - -func fakeGCECloud(vals TestClusterValues) (*GCECloud, error) { - client := &http.Client{Transport: &fakeRoundTripper{}} - - service, err := compute.New(client) - if err != nil { - return nil, err - } - - // Used in disk unit tests - fakeManager := newFakeManager(vals.ProjectID, vals.Region) - zonesWithNodes := createNodeZones([]string{vals.ZoneName}) - - alphaFeatureGate := NewAlphaFeatureGate([]string{}) - if err != nil { - return nil, err - } - - gce := &GCECloud{ - region: vals.Region, - service: service, - manager: fakeManager, - managedZones: []string{vals.ZoneName}, - projectID: vals.ProjectID, - networkProjectID: vals.ProjectID, - AlphaFeatureGate: alphaFeatureGate, - nodeZones: zonesWithNodes, - nodeInformerSynced: func() bool { return true }, - ClusterID: fakeClusterID(vals.ClusterID), - } - - c := cloud.NewMockGCE(&gceProjectRouter{gce}) - c.MockTargetPools.AddInstanceHook = mock.AddInstanceHook - c.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook - c.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook - c.MockAddresses.InsertHook = mock.InsertAddressHook - c.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook - c.MockAlphaAddresses.X = mock.AddressAttributes{} - c.MockAddresses.X = mock.AddressAttributes{} - - c.MockInstanceGroups.X = mock.InstanceGroupAttributes{ - InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts), - Lock: &sync.Mutex{}, - } - c.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook - c.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook - c.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook - - c.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook - c.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook - c.MockFirewalls.UpdateHook = mock.UpdateFirewallHook - - keyGA := meta.GlobalKey("key-ga") - c.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{ - Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)}, - } - - gce.c = c - - return gce, nil -} - func createAndInsertNodes(gce *GCECloud, nodeNames []string, zoneName string) ([]*v1.Node, error) { nodes := []*v1.Node{} @@ -208,17 +116,6 @@ func createAndInsertNodes(gce *GCECloud, nodeNames []string, zoneName string) ([ return nodes, nil } -// Stubs ClusterID so that ClusterID.getOrInitialize() does not require calling -// gce.Initialize() -func fakeClusterID(clusterID string) ClusterID { - return ClusterID{ - clusterID: &clusterID, - store: cache.NewStore(func(obj interface{}) (string, error) { - return "", nil - }), - } -} - func assertExternalLbResources(t *testing.T, gce *GCECloud, apiService *v1.Service, vals TestClusterValues, nodeNames []string) { lbName := gce.GetLoadBalancerName(context.TODO(), "", apiService) hcName := MakeNodesHealthCheckName(vals.ClusterID) diff --git a/pkg/cloudprovider/providers/gce/gce_util.go b/pkg/cloudprovider/providers/gce/gce_util.go index 363b5173abe..ae61550ca19 100644 --- a/pkg/cloudprovider/providers/gce/gce_util.go +++ b/pkg/cloudprovider/providers/gce/gce_util.go @@ -24,17 +24,55 @@ import ( "regexp" "sort" "strings" + "sync" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/mock" "cloud.google.com/go/compute/metadata" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) +func fakeGCECloud(vals TestClusterValues) (*GCECloud, error) { + gce := simpleFakeGCECloud(vals) + + gce.AlphaFeatureGate = NewAlphaFeatureGate([]string{}) + gce.nodeInformerSynced = func() bool { return true } + + mockGCE := gce.c.(*cloud.MockGCE) + mockGCE.MockTargetPools.AddInstanceHook = mock.AddInstanceHook + mockGCE.MockTargetPools.RemoveInstanceHook = mock.RemoveInstanceHook + mockGCE.MockForwardingRules.InsertHook = mock.InsertFwdRuleHook + mockGCE.MockAddresses.InsertHook = mock.InsertAddressHook + mockGCE.MockAlphaAddresses.InsertHook = mock.InsertAlphaAddressHook + mockGCE.MockAlphaAddresses.X = mock.AddressAttributes{} + mockGCE.MockAddresses.X = mock.AddressAttributes{} + + mockGCE.MockInstanceGroups.X = mock.InstanceGroupAttributes{ + InstanceMap: make(map[meta.Key]map[string]*compute.InstanceWithNamedPorts), + Lock: &sync.Mutex{}, + } + mockGCE.MockInstanceGroups.AddInstancesHook = mock.AddInstancesHook + mockGCE.MockInstanceGroups.RemoveInstancesHook = mock.RemoveInstancesHook + mockGCE.MockInstanceGroups.ListInstancesHook = mock.ListInstancesHook + + mockGCE.MockRegionBackendServices.UpdateHook = mock.UpdateRegionBackendServiceHook + mockGCE.MockHealthChecks.UpdateHook = mock.UpdateHealthCheckHook + mockGCE.MockFirewalls.UpdateHook = mock.UpdateFirewallHook + + keyGA := meta.GlobalKey("key-ga") + mockGCE.MockZones.Objects[*keyGA] = &cloud.MockZonesObj{ + Obj: &compute.Zone{Name: vals.ZoneName, Region: gce.getRegionLink(vals.Region)}, + } + + return gce, nil +} + type gceInstance struct { Zone string Name string @@ -281,3 +319,7 @@ func typeOfNetwork(network *compute.Network) netType { return netTypeCustom } + +func getLocationName(project, zoneOrRegion string) string { + return fmt.Sprintf("projects/%s/locations/%s", project, zoneOrRegion) +} diff --git a/pkg/cloudprovider/providers/vsphere/BUILD b/pkg/cloudprovider/providers/vsphere/BUILD index 4f1e053f0ab..c1c3103416a 100644 --- a/pkg/cloudprovider/providers/vsphere/BUILD +++ b/pkg/cloudprovider/providers/vsphere/BUILD @@ -21,10 +21,10 @@ go_library( "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library", "//pkg/controller:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index 2601032457a..2b5d816cb92 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -19,24 +19,23 @@ package vsphere import ( "context" "errors" + "fmt" + "io/ioutil" "os" + "path/filepath" "regexp" "strings" "time" "github.com/golang/glog" "github.com/vmware/govmomi/vim25" - - "fmt" - "github.com/vmware/govmomi/vim25/mo" - "io/ioutil" + "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" - "k8s.io/kubernetes/pkg/util/version" - "path/filepath" ) const ( @@ -298,11 +297,15 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { continue } // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. - defer cleanUpDummyVMLock.Lock() - err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) - if err != nil { - glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + cleanUpDummyVMs := func() { + cleanUpDummyVMLock.Lock() + defer cleanUpDummyVMLock.Unlock() + err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) + if err != nil { + glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) + } } + cleanUpDummyVMs() } } diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index d9840830271..4faac5d2fe1 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -1461,7 +1461,7 @@ func checkNodeFitness(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *s func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason - // If ScheduleDaemonSetPods is enabled, only check nodeSelector and nodeAffinity. + // If ScheduleDaemonSetPods is enabled, only check nodeSelector, nodeAffinity and toleration/taint match. if utilfeature.DefaultFeatureGate.Enabled(features.ScheduleDaemonSetPods) { fit, reasons, err := checkNodeFitness(pod, nil, nodeInfo) if err != nil { diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go index fd1b83abc58..7a8738ac003 100644 --- a/pkg/controller/deployment/util/deployment_util_test.go +++ b/pkg/controller/deployment/util/deployment_util_test.go @@ -151,6 +151,7 @@ func randomUID() types.UID { func generateDeployment(image string) apps.Deployment { podLabels := map[string]string{"name": image} terminationSec := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks return apps.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: image, @@ -176,6 +177,7 @@ func generateDeployment(image string) apps.Deployment { TerminationGracePeriodSeconds: &terminationSec, RestartPolicy: v1.RestartPolicyAlways, SecurityContext: &v1.PodSecurityContext{}, + EnableServiceLinks: &enableServiceLinks, }, }, }, diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index b7fa74e961c..3ad98404cda 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -924,6 +924,7 @@ func (nc *Controller) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.Node v1.NodeOutOfDisk, v1.NodeMemoryPressure, v1.NodeDiskPressure, + v1.NodePIDPressure, // We don't change 'NodeNetworkUnavailable' condition, as it's managed on a control plane level. // v1.NodeNetworkUnavailable, } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index cd581bc0c08..23db5623a02 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -1605,6 +1605,14 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), LastTransitionTime: fakeNow, }, + { + Type: v1.NodePIDPressure, + Status: v1.ConditionUnknown, + Reason: "NodeStatusNeverUpdated", + Message: "Kubelet never posted node status.", + LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), + LastTransitionTime: fakeNow, + }, }, }, }, @@ -1727,6 +1735,14 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) { LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), // should default to node creation time if condition was never updated LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, }, + { + Type: v1.NodePIDPressure, + Status: v1.ConditionUnknown, + Reason: "NodeStatusNeverUpdated", + Message: "Kubelet never posted node status.", + LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), // should default to node creation time if condition was never updated + LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)}, + }, }, Capacity: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"), diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index b899f267863..2e1c763084a 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -464,6 +464,12 @@ func (a *HorizontalController) computeStatusForExternalMetric(currentReplicas in return 0, time.Time{}, "", fmt.Errorf(errMsg) } +func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) { + if a.recommendations[key] == nil { + a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}} + } +} + func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.HorizontalPodAutoscaler, key string) error { // make a copy so that we never mutate the shared informer cache (conversion can mutate the object) hpav1 := hpav1Shared.DeepCopy() @@ -508,6 +514,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho } setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale") currentReplicas := scale.Status.Replicas + a.recordInitialRecommendation(currentReplicas, key) var metricStatuses []autoscalingv2.MetricStatus metricDesiredReplicas := int32(0) diff --git a/pkg/controller/podautoscaler/horizontal_test.go b/pkg/controller/podautoscaler/horizontal_test.go index 939fe8b7b58..0cc8922835f 100644 --- a/pkg/controller/podautoscaler/horizontal_test.go +++ b/pkg/controller/podautoscaler/horizontal_test.go @@ -1112,6 +1112,32 @@ func TestScaleDown(t *testing.T) { reportedLevels: []uint64{100, 300, 500, 250, 250}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, + } + tc.runTest(t) +} + +func TestScaleDownStabilizeInitialSize(t *testing.T) { + tc := testCase{ + minReplicas: 2, + maxReplicas: 6, + initialReplicas: 5, + expectedDesiredReplicas: 5, + CPUTarget: 50, + verifyCPUCurrent: true, + reportedLevels: []uint64{100, 300, 500, 250, 250}, + reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + useMetricsAPI: true, + recommendations: nil, + expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "ReadyForNewScale", + }, autoscalingv2.HorizontalPodAutoscalerCondition{ + Type: autoscalingv2.AbleToScale, + Status: v1.ConditionTrue, + Reason: "ScaleDownStabilized", + }), } tc.runTest(t) } @@ -1139,6 +1165,7 @@ func TestScaleDownCM(t *testing.T) { }, reportedLevels: []uint64{12000, 12000, 12000, 12000, 12000}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1171,6 +1198,7 @@ func TestScaleDownCMObject(t *testing.T) { }, reportedLevels: []uint64{12000}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1195,7 +1223,8 @@ func TestScaleDownCMExternal(t *testing.T) { }, }, }, - reportedLevels: []uint64{8600}, + reportedLevels: []uint64{8600}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1220,7 +1249,8 @@ func TestScaleDownPerPodCMExternal(t *testing.T) { }, }, }, - reportedLevels: []uint64{8600}, + reportedLevels: []uint64{8600}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1238,6 +1268,7 @@ func TestScaleDownIncludeUnreadyPods(t *testing.T) { reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1255,6 +1286,7 @@ func TestScaleDownIgnoreHotCpuPods(t *testing.T) { reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), coolCpuCreationTime(), coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1273,6 +1305,7 @@ func TestScaleDownIgnoresFailedPods(t *testing.T) { useMetricsAPI: true, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1292,6 +1325,7 @@ func TestScaleDownIgnoresDeletionPods(t *testing.T) { reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, reportedPodPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning}, reportedPodDeletionTimestamp: []bool{false, false, false, false, false, true, true}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1457,6 +1491,7 @@ func TestMinReplicas(t *testing.T) { Status: v1.ConditionTrue, Reason: "TooFewReplicas", }), + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1476,6 +1511,7 @@ func TestMinReplicasDesiredZero(t *testing.T) { Status: v1.ConditionTrue, Reason: "TooFewReplicas", }), + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1580,6 +1616,7 @@ func TestMissingMetrics(t *testing.T) { reportedLevels: []uint64{400, 95}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -1665,6 +1702,7 @@ func TestMissingReports(t *testing.T) { reportedLevels: []uint64{200}, reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -2168,7 +2206,8 @@ func TestComputedToleranceAlgImplementation(t *testing.T) { resource.MustParse(fmt.Sprint(perPodRequested) + "m"), resource.MustParse(fmt.Sprint(perPodRequested) + "m"), }, - useMetricsAPI: true, + useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) @@ -2241,6 +2280,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) { reportedPodStartTime: []metav1.Time{coolCpuCreationTime(), hotCpuCreationTime(), hotCpuCreationTime()}, useMetricsAPI: true, lastScaleTime: &now, + recommendations: []timestampedRecommendation{}, } testClient, _, _, _, _ := tc.prepareTestClient(t) tc.testClient = testClient diff --git a/pkg/controller/podautoscaler/legacy_horizontal_test.go b/pkg/controller/podautoscaler/legacy_horizontal_test.go index f29e19012ca..52919a7362a 100644 --- a/pkg/controller/podautoscaler/legacy_horizontal_test.go +++ b/pkg/controller/podautoscaler/legacy_horizontal_test.go @@ -98,7 +98,8 @@ type legacyTestCase struct { resource *fakeResource // Last scale time - lastScaleTime *metav1.Time + lastScaleTime *metav1.Time + recommendations []timestampedRecommendation } // Needs to be called under a lock. @@ -504,6 +505,10 @@ func (tc *legacyTestCase) runTest(t *testing.T) { ) hpaController.hpaListerSynced = alwaysReady + if tc.recommendations != nil { + hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations + } + stop := make(chan struct{}) defer close(stop) informerFactory.Start(stop) @@ -689,6 +694,7 @@ func TestLegacyScaleDown(t *testing.T) { reportedLevels: []uint64{100, 300, 500, 250, 250}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -711,6 +717,7 @@ func TestLegacyScaleDownCM(t *testing.T) { }, reportedLevels: []uint64{12, 12, 12, 12, 12}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } @@ -728,6 +735,7 @@ func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) { reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, useMetricsAPI: true, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, + recommendations: []timestampedRecommendation{}, } tc.runTest(t) } diff --git a/pkg/controller/replicaset/BUILD b/pkg/controller/replicaset/BUILD index be1df4d191c..5d3a179e5a4 100644 --- a/pkg/controller/replicaset/BUILD +++ b/pkg/controller/replicaset/BUILD @@ -83,9 +83,6 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/controller/replicaset/options:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], ) diff --git a/pkg/controller/replicaset/options/BUILD b/pkg/controller/replicaset/options/BUILD deleted file mode 100644 index c668f46a8bc..00000000000 --- a/pkg/controller/replicaset/options/BUILD +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) - -go_library( - name = "go_default_library", - srcs = ["options.go"], - importpath = "k8s.io/kubernetes/pkg/controller/replicaset/options", - deps = ["//vendor/github.com/spf13/pflag:go_default_library"], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) diff --git a/pkg/controller/replicaset/options/options.go b/pkg/controller/replicaset/options/options.go deleted file mode 100644 index 2cfc88a1cec..00000000000 --- a/pkg/controller/replicaset/options/options.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "github.com/spf13/pflag" -) - -type ReplicasetControllerOptions struct { - ConcurrentRSSyncs int -} - -func NewReplicasetControllerOptions() ReplicasetControllerOptions { - return ReplicasetControllerOptions{ - ConcurrentRSSyncs: 5, - } -} - -func (o *ReplicasetControllerOptions) AddFlags(fs *pflag.FlagSet) { - fs.IntVar(&o.ConcurrentRSSyncs, "concurrent-replicaset-syncs", o.ConcurrentRSSyncs, "The number of replicasets that are allowed to sync concurrently. Larger number = more reponsive replica management, but more CPU (and network) load") -} diff --git a/pkg/controller/resourcequota/BUILD b/pkg/controller/resourcequota/BUILD index 5c697684479..d09d8f4a76b 100644 --- a/pkg/controller/resourcequota/BUILD +++ b/pkg/controller/resourcequota/BUILD @@ -15,12 +15,10 @@ go_library( ], importpath = "k8s.io/kubernetes/pkg/controller/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/apis/core/v1:go_default_library", "//pkg/controller:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -49,9 +47,9 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/controller:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 2b46d45ed38..2212bfd4b16 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -39,10 +39,8 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - api "k8s.io/kubernetes/pkg/apis/core" - k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // NamespacedResourcesFunc knows how to discover namespaced resources. @@ -226,7 +224,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) { // if we declared a constraint that has no usage (which this controller can calculate, prioritize it) for constraint := range resourceQuota.Status.Hard { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { - matchedResources := []api.ResourceName{api.ResourceName(constraint)} + matchedResources := []v1.ResourceName{v1.ResourceName(constraint)} for _, evaluator := range rq.registry.List() { if intersection := evaluator.MatchingResources(matchedResources); len(intersection) > 0 { rq.missingUsageQueue.Add(key) @@ -320,25 +318,20 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err } // syncResourceQuota runs a complete sync of resource quota status across all known kinds -func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.ResourceQuota) (err error) { +func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota *v1.ResourceQuota) (err error) { // quota is dirty if any part of spec hard limits differs from the status hard limits - dirty := !apiequality.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard) - - resourceQuota := api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(v1ResourceQuota, &resourceQuota, nil); err != nil { - return err - } + dirty := !apiequality.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage - dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil) + dirty = dirty || resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil - used := api.ResourceList{} + used := v1.ResourceList{} if resourceQuota.Status.Used != nil { - used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used) + used = quota.Add(v1.ResourceList{}, resourceQuota.Status.Used) } - hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard) + hardLimits := quota.Add(v1.ResourceList{}, resourceQuota.Spec.Hard) newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry, resourceQuota.Spec.ScopeSelector) if err != nil { @@ -354,14 +347,14 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc // Create a usage object that is based on the quota resource version that will handle updates // by default, we preserve the past usage observation, and set hard to the current spec - usage := api.ResourceQuota{ + usage := v1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ Name: resourceQuota.Name, Namespace: resourceQuota.Namespace, ResourceVersion: resourceQuota.ResourceVersion, Labels: resourceQuota.Labels, Annotations: resourceQuota.Annotations}, - Status: api.ResourceQuotaStatus{ + Status: v1.ResourceQuotaStatus{ Hard: hardLimits, Used: used, }, @@ -371,11 +364,7 @@ func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota *v1.Resourc // there was a change observed by this controller that requires we update quota if dirty { - v1Usage := &v1.ResourceQuota{} - if err := k8s_api_v1.Convert_core_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil { - return err - } - _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage) + _, err = rq.rqClient.ResourceQuotas(usage.Namespace).UpdateStatus(&usage) return err } return nil @@ -406,12 +395,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupResource schema.GroupReso // only queue those quotas that are tracking a resource associated with this kind. for i := range resourceQuotas { resourceQuota := resourceQuotas[i] - internalResourceQuota := &api.ResourceQuota{} - if err := k8s_api_v1.Convert_v1_ResourceQuota_To_core_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil { - glog.Error(err) - continue - } - resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard) + resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard) if intersection := evaluator.MatchingResources(resourceQuotaResources); len(intersection) > 0 { // TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota. rq.enqueueResourceQuota(resourceQuota) diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index a953e1ca91e..769b23022a3 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -33,9 +33,9 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" - "k8s.io/kubernetes/pkg/quota/install" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" + "k8s.io/kubernetes/pkg/quota/v1/install" ) func getResourceList(cpu, memory string) v1.ResourceList { diff --git a/pkg/controller/resourcequota/resource_quota_monitor.go b/pkg/controller/resourcequota/resource_quota_monitor.go index be87777e0f0..aa77fca731f 100644 --- a/pkg/controller/resourcequota/resource_quota_monitor.go +++ b/pkg/controller/resourcequota/resource_quota_monitor.go @@ -33,9 +33,9 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/evaluator/core" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) type eventType int diff --git a/pkg/controller/volume/attachdetach/BUILD b/pkg/controller/volume/attachdetach/BUILD index 13da43f203d..b3e919746bf 100644 --- a/pkg/controller/volume/attachdetach/BUILD +++ b/pkg/controller/volume/attachdetach/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/controller/volume/attachdetach/reconciler:go_default_library", "//pkg/controller/volume/attachdetach/statusupdater:go_default_library", "//pkg/controller/volume/attachdetach/util:go_default_library", - "//pkg/features:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -27,15 +26,11 @@ go_library( "//pkg/volume/util/volumepathhandler:go_default_library", "//staging/src/k8s.io/api/authentication/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", - "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library", @@ -44,7 +39,6 @@ go_library( "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", - "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], @@ -60,7 +54,6 @@ go_test( "//pkg/controller/volume/attachdetach/testing:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 8895a211c68..b7f07b16c38 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -21,21 +21,16 @@ package attachdetach import ( "fmt" "net" - "reflect" "time" "github.com/golang/glog" authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/api/core/v1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -44,7 +39,6 @@ import ( kcache "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" - csiapiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller" @@ -54,7 +48,6 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -105,7 +98,6 @@ type AttachDetachController interface { func NewAttachDetachController( kubeClient clientset.Interface, csiClient csiclient.Interface, - crdClient apiextensionsclient.Interface, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, pvcInformer coreinformers.PersistentVolumeClaimInformer, @@ -133,7 +125,6 @@ func NewAttachDetachController( adc := &attachDetachController{ kubeClient: kubeClient, csiClient: csiClient, - crdClient: crdClient, pvcLister: pvcInformer.Lister(), pvcsSynced: pvcInformer.Informer().HasSynced, pvLister: pvInformer.Lister(), @@ -147,14 +138,6 @@ func NewAttachDetachController( pvcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcs"), } - // Install required CSI CRDs on API server - if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { - adc.installCSIDriverCRD() - } - if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { - adc.installCSINodeInfoCRD() - } - if err := adc.volumePluginMgr.InitPlugins(plugins, prober, adc); err != nil { return nil, fmt.Errorf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err) } @@ -257,14 +240,10 @@ type attachDetachController struct { // the API server. kubeClient clientset.Interface - // csiClient is the client used to read/write csi.storage.k8s.io API objects - // from the API server. + // csiClient is the csi.storage.k8s.io API client used by volumehost to communicate with + // the API server. csiClient csiclient.Interface - // crdClient is the client used to read/write apiextensions.k8s.io objects - // from the API server. - crdClient apiextensionsclient.Interface - // pvcLister is the shared PVC lister used to fetch and store PVC // objects from the API server. It is shared with other controllers and // therefore the PVC objects in its store should be treated as immutable. @@ -670,71 +649,6 @@ func (adc *attachDetachController) processVolumesInUse( } } -func (adc *attachDetachController) installCSIDriverCRD() error { - crd := &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: csiapiv1alpha1.CsiDriverResourcePlural + "." + csiapiv1alpha1.GroupName, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: csiapiv1alpha1.GroupName, - Version: csiapiv1alpha1.SchemeGroupVersion.Version, - Scope: apiextensionsv1beta1.ClusterScoped, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: csiapiv1alpha1.CsiDriverResourcePlural, - Kind: reflect.TypeOf(csiapiv1alpha1.CSIDriver{}).Name(), - }, - }, - } - res, err := adc.crdClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) - - if err == nil { - glog.Infof("CSIDrivers CRD created successfully: %#v", - res) - } else if apierrors.IsAlreadyExists(err) { - glog.Warningf("CSIDrivers CRD already exists: %#v, err: %#v", - res, err) - } else { - glog.Errorf("failed to create CSIDrivers CRD: %#v, err: %#v", - res, err) - return err - } - - return nil -} - -// installCRDs creates the specified CustomResourceDefinition for the CSIDrivers object. -func (adc *attachDetachController) installCSINodeInfoCRD() error { - crd := &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: csiapiv1alpha1.CsiNodeInfoResourcePlural + "." + csiapiv1alpha1.GroupName, - }, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: csiapiv1alpha1.GroupName, - Version: csiapiv1alpha1.SchemeGroupVersion.Version, - Scope: apiextensionsv1beta1.ClusterScoped, - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: csiapiv1alpha1.CsiNodeInfoResourcePlural, - Kind: reflect.TypeOf(csiapiv1alpha1.CSINodeInfo{}).Name(), - }, - }, - } - res, err := adc.crdClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) - - if err == nil { - glog.Infof("CSINodeInfo CRD created successfully: %#v", - res) - } else if apierrors.IsAlreadyExists(err) { - glog.Warningf("CSINodeInfo CRD already exists: %#v, err: %#v", - res, err) - } else { - glog.Errorf("failed to create CSINodeInfo CRD: %#v, err: %#v", - res, err) - return err - } - - return nil -} - // VolumeHost implementation // This is an unfortunate requirement of the current factoring of volume plugin // initializing code. It requires kubelet specific methods used by the mounting diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index 1ded525fd8f..f8503025217 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -22,7 +22,6 @@ import ( "time" "k8s.io/api/core/v1" - fakeapiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -37,13 +36,11 @@ func Test_NewAttachDetachController_Positive(t *testing.T) { // Arrange fakeKubeClient := controllervolumetesting.CreateTestClient() informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) - fakeApiExtensionsClient := fakeapiextensionsclient.NewSimpleClientset() // Act _, err := NewAttachDetachController( fakeKubeClient, nil, /* csiClient */ - fakeApiExtensionsClient, /* crdClient */ informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().PersistentVolumeClaims(), @@ -149,7 +146,6 @@ func Test_AttachDetachControllerRecovery(t *testing.T) { func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 []*v1.Pod) { fakeKubeClient := controllervolumetesting.CreateTestClient() - fakeApiExtensionsClient := fakeapiextensionsclient.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, time.Second*1) //informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, time.Second*1) plugins := controllervolumetesting.CreateTestPlugin() @@ -220,7 +216,6 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 adcObj, err := NewAttachDetachController( fakeKubeClient, nil, /* csiClient */ - fakeApiExtensionsClient, /* crdClient */ informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().PersistentVolumeClaims(), diff --git a/pkg/controller/volume/persistentvolume/binder_test.go b/pkg/controller/volume/persistentvolume/binder_test.go index d91156281ab..c278c8c01d1 100644 --- a/pkg/controller/volume/persistentvolume/binder_test.go +++ b/pkg/controller/volume/persistentvolume/binder_test.go @@ -45,7 +45,7 @@ func TestSync(t *testing.T) { { // syncClaim binds to a matching unbound volume. "1-1 - successful bind", - newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted), @@ -54,8 +54,8 @@ func TestSync(t *testing.T) { { // syncClaim does not do anything when there is no matching volume. "1-2 - noop", - newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil), newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending, nil), []string{"Normal FailedBinding"}, @@ -65,8 +65,8 @@ func TestSync(t *testing.T) { // syncClaim resets claim.Status to Pending when there is no // matching volume. "1-3 - reset to Pending", - newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound, nil), newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending, nil), []string{"Normal FailedBinding"}, @@ -76,11 +76,11 @@ func TestSync(t *testing.T) { // syncClaim binds claims to the smallest matching volume "1-4 - smallest volume", []*v1.PersistentVolume{ - newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-4_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, []*v1.PersistentVolume{ - newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-4_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), }, newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending, nil), @@ -92,12 +92,12 @@ func TestSync(t *testing.T) { // name), even though a smaller one is available. "1-5 - prebound volume by name - success", []*v1.PersistentVolume{ - newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, []*v1.PersistentVolume{ newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-5_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending, nil), withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -108,12 +108,12 @@ func TestSync(t *testing.T) { // UID), even though a smaller one is available. "1-6 - prebound volume by UID - success", []*v1.PersistentVolume{ - newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, []*v1.PersistentVolume{ newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-6_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending, nil), withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -123,8 +123,8 @@ func TestSync(t *testing.T) { // syncClaim does not bind claim to a volume prebound to a claim with // same name and different UID "1-7 - prebound volume to different claim", - newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil), newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending, nil), []string{"Normal FailedBinding"}, @@ -134,7 +134,7 @@ func TestSync(t *testing.T) { // syncClaim completes binding - simulates controller crash after // PV.ClaimRef is saved "1-8 - complete bind after crash - PV bound", - newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), + newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending, nil), newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, nil, annBoundByController, annBindCompleted), @@ -163,7 +163,7 @@ func TestSync(t *testing.T) { { // syncClaim binds a claim only when the label selector matches the volume "1-11 - bind when selector matches", - withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -172,8 +172,8 @@ func TestSync(t *testing.T) { { // syncClaim does not bind a claim when the label selector doesn't match "1-12 - do not bind when selector does not match", - newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, nil)), []string{"Normal FailedBinding"}, @@ -182,8 +182,8 @@ func TestSync(t *testing.T) { { // syncClaim does not do anything when binding is delayed "1-13 - delayed binding", - newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait), - newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait), + newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait), + newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait), newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait), newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait), []string{"Normal WaitForFirstConsumer"}, @@ -192,7 +192,7 @@ func TestSync(t *testing.T) { { // syncClaim binds when binding is delayed but PV is prebound to PVC "1-14 - successful prebound PV", - newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classWait), + newVolumeArray("volume1-1", "1Gi", "", "claim1-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classWait), newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classWait), newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending, &classWait), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, &classWait, annBoundByController, annBindCompleted), @@ -203,12 +203,12 @@ func TestSync(t *testing.T) { // even if there is smaller volume available "1-15 - successful prebound PVC", []*v1.PersistentVolume{ - newVolume("volume1-15_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-15_1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, []*v1.PersistentVolume{ newVolume("volume1-15_1", "10Gi", "uid1-15", "claim1-15", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), - newVolume("volume1-15_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-15_2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimPending, nil), withExpectedCapacity("10Gi", newClaimArray("claim1-15", "uid1-15", "1Gi", "volume1-15_1", v1.ClaimBound, nil, annBindCompleted)), @@ -218,12 +218,35 @@ func TestSync(t *testing.T) { // syncClaim does not bind pre-bound PVC to PV with different AccessMode "1-16 - successful prebound PVC", // PV has ReadWriteOnce - newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume1-16", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)), claimWithAccessMode([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}, newClaimArray("claim1-16", "uid1-16", "1Gi", "volume1-16", v1.ClaimPending, nil)), noevents, noerrors, testSyncClaim, }, + { + // syncClaim does not bind PVC to non-available PV if it's not pre-bind + "1-17 - skip non-available PV if it's not pre-bind", + []*v1.PersistentVolume{ + newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty), + }, + []*v1.PersistentVolume{ + newVolume("volume1-17-pending", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-failed", "1Gi", "", "", v1.VolumeFailed, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-released", "1Gi", "", "", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume1-17-empty", "1Gi", "", "", "", v1.PersistentVolumeReclaimRetain, classEmpty), + }, + []*v1.PersistentVolumeClaim{ + newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil), + }, + []*v1.PersistentVolumeClaim{ + newClaim("claim1-17", "uid1-17", "1Gi", "", v1.ClaimPending, nil), + }, + noevents, noerrors, testSyncClaim, + }, // [Unit test set 2] User asked for a specific PV. // Test the binding when pv.ClaimRef is already set by controller or @@ -251,7 +274,7 @@ func TestSync(t *testing.T) { // syncClaim with claim pre-bound to a PV that exists and is // unbound. Check it gets bound and no annBoundByController is set. "2-3 - claim prebound to unbound volume", - newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending, nil), newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, nil, annBindCompleted), @@ -261,7 +284,7 @@ func TestSync(t *testing.T) { // claim with claim pre-bound to a PV that is pre-bound to the claim // by name. Check it gets bound and no annBoundByController is set. "2-4 - claim prebound to prebound volume by name", - newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending, nil), newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, nil, annBindCompleted), @@ -272,7 +295,7 @@ func TestSync(t *testing.T) { // claim by UID. Check it gets bound and no annBoundByController is // set. "2-5 - claim prebound to prebound volume by UID", - newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending, nil), newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, nil, annBindCompleted), @@ -303,7 +326,7 @@ func TestSync(t *testing.T) { // unbound, but does not match the selector. Check it gets bound // and no annBoundByController is set. "2-8 - claim prebound to unbound volume that does not match the selector", - newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-8", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume2-8", "1Gi", "uid2-8", "claim2-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimPending, nil)), withLabelSelector(labels, newClaimArray("claim2-8", "uid2-8", "1Gi", "volume2-8", v1.ClaimBound, nil, annBindCompleted)), @@ -314,8 +337,8 @@ func TestSync(t *testing.T) { // unbound, but its size is smaller than requested. //Check that the claim status is reset to Pending "2-9 - claim prebound to unbound volume that size is smaller than requested", - newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimBound, nil), newClaimArray("claim2-9", "uid2-9", "2Gi", "volume2-9", v1.ClaimPending, nil), []string{"Warning VolumeMismatch"}, noerrors, testSyncClaim, @@ -324,8 +347,8 @@ func TestSync(t *testing.T) { // syncClaim with claim pre-bound to a PV that exists and is // unbound, but its class does not match. Check that the claim status is reset to Pending "2-10 - claim prebound to unbound volume that class is different", - newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), - newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), + newVolumeArray("volume2-10", "1Gi", "1", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), + newVolumeArray("volume2-10", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimBound, nil), newClaimArray("claim2-10", "uid2-10", "1Gi", "volume2-10", v1.ClaimPending, nil), []string{"Warning VolumeMismatch"}, noerrors, testSyncClaim, @@ -356,7 +379,7 @@ func TestSync(t *testing.T) { // syncClaim with claim bound to unbound volume. Check it's bound. // Also check that Pending phase is set to Bound "3-3 - bound claim with unbound volume", - newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted), @@ -366,8 +389,8 @@ func TestSync(t *testing.T) { // syncClaim with claim bound to volume with missing (or different) // volume.Spec.ClaimRef.UID. Check that the claim is marked as lost. "3-4 - bound claim with prebound volume", - newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, nil, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, nil, annBoundByController, annBindCompleted), []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, @@ -377,7 +400,7 @@ func TestSync(t *testing.T) { // controller does not do anything. Also check that Pending phase is // set to Bound "3-5 - bound claim with bound volume", - newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, nil, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, nil, annBindCompleted), @@ -388,8 +411,8 @@ func TestSync(t *testing.T) { // claim. Check that the claim is marked as lost. // TODO: test that an event is emitted "3-6 - bound claim with bound volume", - newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, nil, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, nil, annBindCompleted), []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, @@ -399,7 +422,7 @@ func TestSync(t *testing.T) { // even if the claim's selector doesn't match the volume. Also // check that Pending phase is set to Bound "3-7 - bound claim with unbound volume where selector doesn't match", - newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, nil, annBoundByController, annBindCompleted)), withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -409,7 +432,7 @@ func TestSync(t *testing.T) { { // syncVolume with pending volume. Check it's marked as Available. "4-1 - pending volume", - newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), noclaims, noclaims, @@ -419,7 +442,7 @@ func TestSync(t *testing.T) { // syncVolume with prebound pending volume. Check it's marked as // Available. "4-2 - pending prebound volume", - newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), noclaims, noclaims, @@ -503,11 +526,11 @@ func TestSync(t *testing.T) { // smaller PV available "13-1 - binding to class", []*v1.PersistentVolume{ - newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), - newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), + newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume13-1-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), }, []*v1.PersistentVolume{ - newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume13-1-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController), }, newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, &classGold), @@ -519,11 +542,11 @@ func TestSync(t *testing.T) { // smaller PV with a class available "13-2 - binding without a class", []*v1.PersistentVolume{ - newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), - newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), + newVolume("volume13-2-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), }, []*v1.PersistentVolume{ - newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), + newVolume("volume13-2-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), }, newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending, nil), @@ -535,11 +558,11 @@ func TestSync(t *testing.T) { // smaller PV with different class available "13-3 - binding to specific a class", []*v1.PersistentVolume{ - newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver), - newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), + newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver), + newVolume("volume13-3-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), }, []*v1.PersistentVolume{ - newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classSilver), + newVolume("volume13-3-1", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classSilver), newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController), }, newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, &classGold), @@ -550,7 +573,7 @@ func TestSync(t *testing.T) { // syncVolume binds claim requesting class "" to claim to PV with // class="" "13-4 - empty class", - newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending, &classEmpty), newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, &classEmpty, annBoundByController, annBindCompleted), @@ -560,7 +583,7 @@ func TestSync(t *testing.T) { // syncVolume binds claim requesting class nil to claim to PV with // class = "" "13-5 - nil class", - newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty), + newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty), newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController), newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending, nil), newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted), @@ -572,7 +595,7 @@ func TestSync(t *testing.T) { { // syncVolume binds a requested block claim to a block volume "14-1 - binding to volumeMode block", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -581,7 +604,7 @@ func TestSync(t *testing.T) { { // syncVolume binds a requested filesystem claim to a filesystem volume "14-2 - binding to volumeMode filesystem", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -590,7 +613,7 @@ func TestSync(t *testing.T) { { // syncVolume binds an unspecified volumemode for claim to a specified filesystem volume "14-3 - binding to volumeMode filesystem using default for claim", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -599,7 +622,7 @@ func TestSync(t *testing.T) { { // syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume "14-4 - binding to unspecified volumeMode using requested filesystem for claim", - withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -608,7 +631,7 @@ func TestSync(t *testing.T) { { // syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume "14-5 - binding different volumeModes should be ignored", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -637,7 +660,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds a requested block claim to a block volume "14-1 - binding to volumeMode block", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -646,7 +669,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds a requested filesystem claim to a filesystem volume "14-2 - binding to volumeMode filesystem", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -655,8 +678,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind to an unspecified volumemode for claim to a specified filesystem volume "14-3 - do not bind pv volumeMode filesystem and pvc volumeMode block", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)), []string{"Normal FailedBinding"}, @@ -665,8 +688,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind a requested filesystem claim to an unspecified volumeMode for volume "14-4 - do not bind pv volumeMode block and pvc volumeMode filesystem", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)), []string{"Normal FailedBinding"}, @@ -675,8 +698,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind when matching class but not matching volumeModes "14-5 - do not bind when matching class but not volumeMode", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)), []string{"Warning ProvisioningFailed"}, @@ -685,8 +708,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind when matching volumeModes but class does not match "14-5-1 - do not bind when matching volumeModes but class does not match", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)), []string{"Warning ProvisioningFailed"}, @@ -695,8 +718,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind when pvc is prebound to pv with matching volumeModes but class does not match "14-5-2 - do not bind when pvc is prebound to pv with matching volumeModes but class does not match", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)), []string{"Warning VolumeMismatch"}, @@ -705,7 +728,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume bind when pv is prebound and volumeModes match "14-7 - bind when pv volume is prebound and volumeModes match", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "uid14-7", "claim14-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "volume14-7", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -714,8 +737,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes "14-8 - do not bind when pvc is prebound to pv with mismatching volumeModes", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)), []string{"Warning VolumeMismatch"}, @@ -724,8 +747,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes "14-8-1 - do not bind when pvc is prebound to pv with mismatching volumeModes", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)), []string{"Normal FailedBinding"}, @@ -734,7 +757,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds when pvc is prebound to pv with matching volumeModes block "14-9 - bind when pvc is prebound to pv with matching volumeModes block", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "uid14-9", "claim14-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimBound, nil, annBindCompleted)), @@ -743,7 +766,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds when pv is prebound to pvc with matching volumeModes block "14-10 - bind when pv is prebound to pvc with matching volumeModes block", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "uid14-10", "claim14-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "volume14-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -752,7 +775,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds when pvc is prebound to pv with matching volumeModes filesystem "14-11 - bind when pvc is prebound to pv with matching volumeModes filesystem", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "uid14-11", "claim14-11", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimBound, nil, annBindCompleted)), @@ -761,7 +784,7 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume binds when pv is prebound to pvc with matching volumeModes filesystem "14-12 - bind when pv is prebound to pvc with matching volumeModes filesystem", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty)), withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "uid14-12", "claim14-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)), @@ -770,8 +793,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume output warning when pv is prebound to pvc with mismatching volumeMode "14-13 - output warning when pv is prebound to pvc with different volumeModes", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-13", "10Gi", "uid14-13", "claim14-13", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-13", "uid14-13", "10Gi", "", v1.ClaimPending, nil)), []string{"Warning VolumeMismatch"}, @@ -780,8 +803,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume output warning when pv is prebound to pvc with mismatching volumeMode "14-13-1 - output warning when pv is prebound to pvc with different volumeModes", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-13-1", "10Gi", "uid14-13-1", "claim14-13-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-13-1", "uid14-13-1", "10Gi", "", v1.ClaimPending, nil)), []string{"Warning VolumeMismatch"}, @@ -790,8 +813,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode block "14-14 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes block", - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), - withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-14", "10Gi", "uid14-14", "claim14-14", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeBlock, newClaimArray("claim14-14", "uid14-14", "10Gi", "", v1.ClaimPending, nil)), noevents, noerrors, testSyncVolume, @@ -799,8 +822,8 @@ func TestSyncAlphaBlockVolume(t *testing.T) { { // syncVolume waits for synClaim without warning when pv is prebound to pvc with matching volumeMode file "14-14-1 - wait for synClaim without warning when pv is prebound to pvc with matching volumeModes file", - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), - withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), + withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-14-1", "10Gi", "uid14-14-1", "claim14-14-1", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)), withClaimVolumeMode(&modeFile, newClaimArray("claim14-14-1", "uid14-14-1", "10Gi", "", v1.ClaimPending, nil)), noevents, noerrors, testSyncVolume, diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index 13e36c25df9..4626e661e2a 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -1222,6 +1222,9 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } } diff --git a/pkg/controller/volume/persistentvolume/index.go b/pkg/controller/volume/persistentvolume/index.go index a23cd6faa0b..4c84abe82e0 100644 --- a/pkg/controller/volume/persistentvolume/index.go +++ b/pkg/controller/volume/persistentvolume/index.go @@ -211,11 +211,18 @@ func findMatchingVolume( } // filter out: + // - volumes in non-available phase // - volumes bound to another claim // - volumes whose labels don't match the claim's selector, if specified // - volumes in Class that is not requested // - volumes whose NodeAffinity does not match the node - if volume.Spec.ClaimRef != nil { + if volume.Status.Phase != v1.VolumeAvailable { + // We ignore volumes in non-available phase, because volumes that + // satisfies matching criteria will be updated to available, binding + // them now has high chance of encountering unnecessary failures + // due to API conflicts. + continue + } else if volume.Spec.ClaimRef != nil { continue } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { continue diff --git a/pkg/controller/volume/persistentvolume/index_test.go b/pkg/controller/volume/persistentvolume/index_test.go index caace043be6..e2053fe830c 100644 --- a/pkg/controller/volume/persistentvolume/index_test.go +++ b/pkg/controller/volume/persistentvolume/index_test.go @@ -213,6 +213,9 @@ func TestMatchingWithBoundVolumes(t *testing.T) { // this one we're pretending is already bound ClaimRef: &v1.ObjectReference{UID: "abc123"}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, } pv2 := &v1.PersistentVolume{ @@ -229,6 +232,9 @@ func TestMatchingWithBoundVolumes(t *testing.T) { }, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } volumeIndex.store.Add(pv1) @@ -330,6 +336,9 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { v1.ReadOnlyMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } ebs := &v1.PersistentVolume{ @@ -341,6 +350,9 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { v1.ReadWriteOnce, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } nfs := &v1.PersistentVolume{ @@ -354,6 +366,9 @@ func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { v1.ReadWriteMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } claim := &v1.PersistentVolumeClaim{ @@ -442,6 +457,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadOnlyMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -462,6 +480,9 @@ func createTestVolumes() []*v1.PersistentVolume { // this one we're pretending is already bound ClaimRef: &v1.ObjectReference{UID: "def456"}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -481,6 +502,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadWriteMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -501,6 +525,9 @@ func createTestVolumes() []*v1.PersistentVolume { // this one we're pretending is already bound ClaimRef: &v1.ObjectReference{UID: "abc123"}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeBound, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -520,6 +547,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadWriteMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -538,6 +568,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadOnlyMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -557,6 +590,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadWriteMany, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -577,6 +613,9 @@ func createTestVolumes() []*v1.PersistentVolume { v1.ReadWriteOnce, }, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -598,6 +637,9 @@ func createTestVolumes() []*v1.PersistentVolume { }, StorageClassName: classSilver, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -616,6 +658,9 @@ func createTestVolumes() []*v1.PersistentVolume { }, StorageClassName: classSilver, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -634,6 +679,9 @@ func createTestVolumes() []*v1.PersistentVolume { }, StorageClassName: classGold, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -654,6 +702,9 @@ func createTestVolumes() []*v1.PersistentVolume { }, StorageClassName: classLarge, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -674,6 +725,9 @@ func createTestVolumes() []*v1.PersistentVolume { }, StorageClassName: classLarge, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -694,6 +748,9 @@ func createTestVolumes() []*v1.PersistentVolume { StorageClassName: classWait, NodeAffinity: getVolumeNodeAffinity("key1", "value1"), }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -714,6 +771,9 @@ func createTestVolumes() []*v1.PersistentVolume { StorageClassName: classWait, NodeAffinity: getVolumeNodeAffinity("key1", "value1"), }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -735,6 +795,9 @@ func createTestVolumes() []*v1.PersistentVolume { ClaimRef: &v1.ObjectReference{Name: "claim02", Namespace: "myns"}, NodeAffinity: getVolumeNodeAffinity("key1", "value1"), }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, }, { ObjectMeta: metav1.ObjectMeta{ @@ -755,6 +818,98 @@ func createTestVolumes() []*v1.PersistentVolume { StorageClassName: classWait, NodeAffinity: getVolumeNodeAffinity("key1", "value3"), }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "affinity-pv4-pending", + Name: "affinity004-pending", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{}, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + }, + StorageClassName: classWait, + NodeAffinity: getVolumeNodeAffinity("key1", "value4"), + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumePending, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "affinity-pv4-failed", + Name: "affinity004-failed", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{}, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + }, + StorageClassName: classWait, + NodeAffinity: getVolumeNodeAffinity("key1", "value4"), + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeFailed, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "affinity-pv4-released", + Name: "affinity004-released", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{}, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + }, + StorageClassName: classWait, + NodeAffinity: getVolumeNodeAffinity("key1", "value4"), + }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeReleased, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "affinity-pv4-empty", + Name: "affinity004-empty", + }, + Spec: v1.PersistentVolumeSpec{ + Capacity: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse("200G"), + }, + PersistentVolumeSource: v1.PersistentVolumeSource{ + Local: &v1.LocalVolumeSource{}, + }, + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + v1.ReadOnlyMany, + }, + StorageClassName: classWait, + NodeAffinity: getVolumeNodeAffinity("key1", "value4"), + }, }, } } @@ -770,6 +925,9 @@ func testVolume(name, size string) *v1.PersistentVolume { PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } } @@ -811,6 +969,9 @@ func createVolumeModeBlockTestVolume() *v1.PersistentVolume { }, VolumeMode: &blockMode, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } } @@ -834,6 +995,9 @@ func createVolumeModeFilesystemTestVolume() *v1.PersistentVolume { }, VolumeMode: &filesystemMode, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } } @@ -1054,6 +1218,9 @@ func TestAlphaStorageObjectInUseProtectionFiltering(t *testing.T) { PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{}}, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } pvToDelete := pv.DeepCopy() @@ -1274,6 +1441,11 @@ func TestFindMatchVolumeWithNode(t *testing.T) { Labels: map[string]string{"key1": "value3"}, }, } + node4 := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"key1": "value4"}, + }, + } scenarios := map[string]struct { expectedMatch string @@ -1341,6 +1513,15 @@ func TestFindMatchVolumeWithNode(t *testing.T) { }), node: node3, }, + "fail-nonavaiable": { + expectedMatch: "", + claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) { + pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce} + pvc.Spec.StorageClassName = &classWait + pvc.Name = "claim04" + }), + node: node4, + }, "success-bad-and-good-node-affinity": { expectedMatch: "affinity-pv3", claim: makePVC("100G", func(pvc *v1.PersistentVolumeClaim) { diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index 93368a58d0a..0337996e464 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -166,7 +166,7 @@ func TestProvisionSync(t *testing.T) { { // No provisioning if there is a matching volume available "11-6 - provisioning when there is a volume available", - newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold), + newVolumeArray("volume11-6", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classGold), newVolumeArray("volume11-6", "1Gi", "uid11-6", "claim11-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classGold, annBoundByController), newClaimArray("claim11-6", "uid11-6", "1Gi", "", v1.ClaimPending, &classGold), newClaimArray("claim11-6", "uid11-6", "1Gi", "volume11-6", v1.ClaimBound, &classGold, annBoundByController, annBindCompleted), diff --git a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go index 0bdfba3fd20..8b669caa5b1 100644 --- a/pkg/controller/volume/persistentvolume/scheduler_binder_test.go +++ b/pkg/controller/volume/persistentvolume/scheduler_binder_test.go @@ -494,6 +494,9 @@ func makeTestPV(name, node, capacity, version string, boundToPVC *v1.PersistentV }, StorageClassName: className, }, + Status: v1.PersistentVolumeStatus{ + Phase: v1.VolumeAvailable, + }, } if node != "" { pv.Spec.NodeAffinity = getVolumeNodeAffinity(nodeLabelKey, node) diff --git a/pkg/credentialprovider/BUILD b/pkg/credentialprovider/BUILD index 351420ae926..f83f75be26b 100644 --- a/pkg/credentialprovider/BUILD +++ b/pkg/credentialprovider/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/credentialprovider", deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//vendor/github.com/docker/docker/api/types:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) @@ -31,7 +30,6 @@ go_test( "provider_test.go", ], embed = [":go_default_library"], - deps = ["//vendor/github.com/docker/docker/api/types:go_default_library"], ) filegroup( diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index b269f474600..8a6f563d085 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -25,7 +25,6 @@ import ( "github.com/golang/glog" - dockertypes "github.com/docker/docker/api/types" "k8s.io/apimachinery/pkg/util/sets" ) @@ -52,17 +51,39 @@ type lazyDockerKeyring struct { Providers []DockerConfigProvider } +// AuthConfig contains authorization information for connecting to a Registry +// This type mirrors "github.com/docker/docker/api/types.AuthConfig" +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + // LazyAuthConfiguration wraps dockertypes.AuthConfig, potentially deferring its // binding. If Provider is non-nil, it will be used to obtain new credentials // by calling LazyProvide() on it. type LazyAuthConfiguration struct { - dockertypes.AuthConfig + AuthConfig Provider DockerConfigProvider } func DockerConfigEntryToLazyAuthConfiguration(ident DockerConfigEntry) LazyAuthConfiguration { return LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: ident.Username, Password: ident.Password, Email: ident.Email, diff --git a/pkg/credentialprovider/keyring_test.go b/pkg/credentialprovider/keyring_test.go index 7aef17d7e5e..2b36bde889a 100644 --- a/pkg/credentialprovider/keyring_test.go +++ b/pkg/credentialprovider/keyring_test.go @@ -21,8 +21,6 @@ import ( "fmt" "reflect" "testing" - - dockertypes "github.com/docker/docker/api/types" ) func TestUrlsMatch(t *testing.T) { @@ -505,7 +503,7 @@ func TestLazyKeyring(t *testing.T) { func TestDockerKeyringLookup(t *testing.T) { ada := LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: "ada", Password: "smash", Email: "ada@example.com", @@ -513,7 +511,7 @@ func TestDockerKeyringLookup(t *testing.T) { } grace := LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: "grace", Password: "squash", Email: "grace@example.com", @@ -576,7 +574,7 @@ func TestDockerKeyringLookup(t *testing.T) { // NOTE: the above covers the case of a more specific match trumping just hostname. func TestIssue3797(t *testing.T) { rex := LazyAuthConfiguration{ - AuthConfig: dockertypes.AuthConfig{ + AuthConfig: AuthConfig{ Username: "rex", Password: "tiny arms", Email: "rex@example.com", diff --git a/pkg/credentialprovider/provider.go b/pkg/credentialprovider/provider.go index 419dc43e5df..422696e9b0b 100644 --- a/pkg/credentialprovider/provider.go +++ b/pkg/credentialprovider/provider.go @@ -22,7 +22,6 @@ import ( "sync" "time" - dockertypes "github.com/docker/docker/api/types" "github.com/golang/glog" ) @@ -40,14 +39,12 @@ type DockerConfigProvider interface { LazyProvide() *DockerConfigEntry } -func LazyProvide(creds LazyAuthConfiguration) dockertypes.AuthConfig { +func LazyProvide(creds LazyAuthConfiguration) AuthConfig { if creds.Provider != nil { entry := *creds.Provider.LazyProvide() return DockerConfigEntryToLazyAuthConfiguration(entry).AuthConfig - } else { - return creds.AuthConfig } - + return creds.AuthConfig } // A DockerConfigProvider that simply reads the .dockercfg file diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 614dcac8962..bb5cfd587ed 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -179,13 +179,6 @@ const ( // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" - // owner @brendandburns - // deprecated: v1.10 - // - // Enable the service proxy to contact external IP addresses. Note this feature is present - // only for backward compatibility, it will be removed in the 1.11 release. - ServiceProxyAllowExternalIPs utilfeature.Feature = "ServiceProxyAllowExternalIPs" - // owner: @jsafrane // alpha: v1.9 // @@ -257,13 +250,6 @@ const ( // Enable Hyper-V containers on Windows HyperVContainer utilfeature.Feature = "HyperVContainer" - // owner: @joelsmith - // deprecated: v1.10 - // - // Mount secret, configMap, downwardAPI and projected volumes ReadOnly. Note: this feature - // gate is present only for backward compatibility, it will be removed in the 1.11 release. - ReadOnlyAPIDataVolumes utilfeature.Feature = "ReadOnlyAPIDataVolumes" - // owner: @k82cn // beta: v1.12 // @@ -473,6 +459,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, // features that enable backwards compatibility but are scheduled to be removed - ServiceProxyAllowExternalIPs: {Default: false, PreRelease: utilfeature.Deprecated}, - ReadOnlyAPIDataVolumes: {Default: true, PreRelease: utilfeature.Deprecated}, + // ... } diff --git a/pkg/kubeapiserver/admission/BUILD b/pkg/kubeapiserver/admission/BUILD index bc4dda702ae..bfbd57058cc 100644 --- a/pkg/kubeapiserver/admission/BUILD +++ b/pkg/kubeapiserver/admission/BUILD @@ -20,7 +20,7 @@ go_library( deps = [ "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/quota:go_default_library", + "//pkg/quota/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library", diff --git a/pkg/kubeapiserver/admission/initializer.go b/pkg/kubeapiserver/admission/initializer.go index 20f80a58b97..d5751c4b618 100644 --- a/pkg/kubeapiserver/admission/initializer.go +++ b/pkg/kubeapiserver/admission/initializer.go @@ -23,7 +23,7 @@ import ( "k8s.io/apiserver/pkg/util/webhook" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // TODO add a `WantsToRun` which takes a stopCh. Might make it generic. diff --git a/pkg/kubeapiserver/server/insecure_handler.go b/pkg/kubeapiserver/server/insecure_handler.go index 754aff49b2e..bde19e53929 100644 --- a/pkg/kubeapiserver/server/insecure_handler.go +++ b/pkg/kubeapiserver/server/insecure_handler.go @@ -28,6 +28,7 @@ import ( // You shouldn't be using this. It makes sig-auth sad. // DeprecatedInsecureServingInfo *ServingInfo +// BuildInsecureHandlerChain sets up the server to listen to http. Should be removed. func BuildInsecureHandlerChain(apiHandler http.Handler, c *server.Config) http.Handler { handler := apiHandler handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc) diff --git a/pkg/kubectl/BUILD b/pkg/kubectl/BUILD index d9cb4e0774b..49c7a319ed2 100644 --- a/pkg/kubectl/BUILD +++ b/pkg/kubectl/BUILD @@ -113,9 +113,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl", deps = [ "//pkg/api/legacyscheme:go_default_library", - "//pkg/api/pod:go_default_library", "//pkg/api/v1/pod:go_default_library", - "//pkg/apis/apps:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 885485458dc..b8ffc63d335 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -40,6 +40,7 @@ go_library( "patch.go", "plugin.go", "portforward.go", + "profiling.go", "proxy.go", "replace.go", "rollingupdate.go", @@ -59,8 +60,6 @@ go_library( "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/kubectl:go_default_library", - "//pkg/kubectl/apply/parse:go_default_library", - "//pkg/kubectl/apply/strategy:go_default_library", "//pkg/kubectl/cmd/auth:go_default_library", "//pkg/kubectl/cmd/config:go_default_library", "//pkg/kubectl/cmd/create:go_default_library", @@ -192,7 +191,6 @@ go_test( "//pkg/api/legacyscheme:go_default_library", "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", - "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/kubectl/cmd/create:go_default_library", @@ -204,7 +202,9 @@ go_test( "//pkg/kubectl/util/i18n:go_default_library", "//pkg/kubectl/util/term:go_default_library", "//pkg/printers:go_default_library", + "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/pkg/kubectl/cmd/alpha.go b/pkg/kubectl/cmd/alpha.go index 835dd51c77a..cb78568d950 100644 --- a/pkg/kubectl/cmd/alpha.go +++ b/pkg/kubectl/cmd/alpha.go @@ -36,7 +36,6 @@ func NewCmdAlpha(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra. // Alpha commands should be added here. As features graduate from alpha they should move // from here to the CommandGroups defined by NewKubeletCommand() in cmd.go. //cmd.AddCommand(NewCmdDebug(f, in, out, err)) - cmd.AddCommand(NewCmdDiff(f, streams)) // NewKubeletCommand() will hide the alpha command if it has no subcommands. Overriding // the help function ensures a reasonable message if someone types the hidden command anyway. diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index b4d588c0897..ee8b8e3f0c9 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -26,7 +26,7 @@ import ( "github.com/jonboulle/clockwork" "github.com/spf13/cobra" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,7 +44,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/dynamic" oapi "k8s.io/kube-openapi/pkg/util/proto" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -399,7 +398,7 @@ func (o *ApplyOptions) Run() error { } annotationMap := metadata.GetAnnotations() - if _, ok := annotationMap[api.LastAppliedConfigAnnotation]; !ok { + if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { fmt.Fprintf(o.ErrOut, warningNoLastAppliedConfigAnnotation, o.cmdBaseName) } @@ -467,7 +466,7 @@ func (o *ApplyOptions) Run() error { objToPrint := objs[0] if len(objs) > 1 { - list := &v1.List{ + list := &corev1.List{ TypeMeta: metav1.TypeMeta{ Kind: "List", APIVersion: "v1", @@ -619,7 +618,7 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping, includeUnini return err } annots := metadata.GetAnnotations() - if _, ok := annots[api.LastAppliedConfigAnnotation]; !ok { + if _, ok := annots[corev1.LastAppliedConfigAnnotation]; !ok { // don't prune resources not created with apply continue } diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go index 9ecaf4726dc..54d7dcfcc48 100644 --- a/pkg/kubectl/cmd/attach.go +++ b/pkg/kubectl/cmd/attach.go @@ -31,7 +31,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/resource" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" @@ -137,7 +136,7 @@ func defaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, ra Stdout: o.Out != nil, Stderr: !o.DisableStderr, TTY: raw, - }, legacyscheme.ParameterCodec) + }, scheme.ParameterCodec) return o.Attach.Attach("POST", req.URL(), o.Config, o.In, o.Out, o.ErrOut, raw, sizeQueue) } diff --git a/pkg/kubectl/cmd/autoscale.go b/pkg/kubectl/cmd/autoscale.go index da5a00d6ccb..abf87915cc0 100644 --- a/pkg/kubectl/cmd/autoscale.go +++ b/pkg/kubectl/cmd/autoscale.go @@ -28,7 +28,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" autoscalingv1client "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -195,7 +194,7 @@ func (o *AutoscaleOptions) Validate() error { func (o *AutoscaleOptions) Run() error { r := o.builder. - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). NamespaceParam(o.namespace).DefaultNamespace(). FilenameParam(o.enforceNamespace, o.FilenameOptions). @@ -246,7 +245,7 @@ func (o *AutoscaleOptions) Run() error { return printer.PrintObj(hpa, o.Out) } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, hpa, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, hpa, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/pkg/kubectl/cmd/clusterinfo.go b/pkg/kubectl/cmd/clusterinfo.go index 4ce0cce4083..8d27c8a3335 100644 --- a/pkg/kubectl/cmd/clusterinfo.go +++ b/pkg/kubectl/cmd/clusterinfo.go @@ -21,15 +21,15 @@ import ( "io" "strconv" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" restclient "k8s.io/client-go/rest" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" ct "github.com/daviddengcn/go-colortext" @@ -94,7 +94,7 @@ func (o *ClusterInfoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) err func (o *ClusterInfoOptions) Run() error { // TODO use generalized labels once they are implemented (#341) b := o.Builder. - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). LabelSelectorParam("kubernetes.io/cluster-service=true"). ResourceTypeOrNameArgs(false, []string{"services"}...). @@ -105,7 +105,7 @@ func (o *ClusterInfoOptions) Run() error { } printService(o.Out, "Kubernetes master", o.Client.Host) - services := r.Object.(*api.ServiceList).Items + services := r.Object.(*corev1.ServiceList).Items for _, service := range services { var link string if len(service.Status.LoadBalancer.Ingress) > 0 { diff --git a/pkg/kubectl/cmd/clusterinfo_dump.go b/pkg/kubectl/cmd/clusterinfo_dump.go index 789bd9f798f..741ab6ecc89 100644 --- a/pkg/kubectl/cmd/clusterinfo_dump.go +++ b/pkg/kubectl/cmd/clusterinfo_dump.go @@ -31,7 +31,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" @@ -252,7 +251,7 @@ func (o *ClusterInfoDumpOptions) Run() error { writer.Write([]byte(fmt.Sprintf("==== START logs for container %s of pod %s/%s ====\n", container.Name, pod.Namespace, pod.Name))) defer writer.Write([]byte(fmt.Sprintf("==== END logs for container %s of pod %s/%s ====\n", container.Name, pod.Namespace, pod.Name))) - requests, err := o.LogsForObject(o.RESTClientGetter, pod, &api.PodLogOptions{Container: container.Name}, timeout, false) + requests, err := o.LogsForObject(o.RESTClientGetter, pod, &corev1.PodLogOptions{Container: container.Name}, timeout, false) if err != nil { // Print error and return. writer.Write([]byte(fmt.Sprintf("Create log request error: %s\n", err.Error()))) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index b15dcc53b1f..95d8ed84a05 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -370,6 +370,14 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { Find more information at: https://kubernetes.io/docs/reference/kubectl/overview/`), Run: runHelp, + // Hook before and after Run initialize and write profiles to disk, + // respectively. + PersistentPreRunE: func(*cobra.Command, []string) error { + return initProfiling() + }, + PersistentPostRunE: func(*cobra.Command, []string) error { + return flushProfiling() + }, BashCompletionFunction: bashCompletionFunc, } @@ -380,6 +388,8 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { // a.k.a. change all "_" to "-". e.g. glog package flags.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) + addProfilingFlags(flags) + kubeConfigFlags := genericclioptions.NewConfigFlags() kubeConfigFlags.AddFlags(flags) matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) @@ -458,6 +468,7 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { { Message: "Advanced Commands:", Commands: []*cobra.Command{ + NewCmdDiff(f, ioStreams), NewCmdApply("kubectl", f, ioStreams), NewCmdPatch(f, ioStreams), NewCmdReplace(f, ioStreams), diff --git a/pkg/kubectl/cmd/create/BUILD b/pkg/kubectl/cmd/create/BUILD index 94862b9a9eb..ceedd7363c5 100644 --- a/pkg/kubectl/cmd/create/BUILD +++ b/pkg/kubectl/cmd/create/BUILD @@ -74,7 +74,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/api/testing:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/testing:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", diff --git a/pkg/kubectl/cmd/create/create.go b/pkg/kubectl/cmd/create/create.go index 6e5fe28e1ff..165ea1d4143 100644 --- a/pkg/kubectl/cmd/create/create.go +++ b/pkg/kubectl/cmd/create/create.go @@ -241,7 +241,7 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { if err != nil { return err } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info.Object, scheme.DefaultJSONEncoder()); err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) } @@ -428,7 +428,7 @@ func (o *CreateSubcommandOptions) Run() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.CreateAnnotation, obj, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.CreateAnnotation, obj, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/pkg/kubectl/cmd/create/create_test.go b/pkg/kubectl/cmd/create/create_test.go index ad19d4ff155..7b32c8621e4 100644 --- a/pkg/kubectl/cmd/create/create_test.go +++ b/pkg/kubectl/cmd/create/create_test.go @@ -26,7 +26,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" "k8s.io/client-go/rest/fake" - apitesting "k8s.io/kubernetes/pkg/api/testing" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" @@ -163,6 +162,7 @@ func initTestErrorHandler(t *testing.T) { } func testData() (*corev1.PodList, *corev1.ServiceList, *corev1.ReplicationControllerList) { + grace := int64(30) pods := &corev1.PodList{ ListMeta: metav1.ListMeta{ ResourceVersion: "15", @@ -170,11 +170,21 @@ func testData() (*corev1.PodList, *corev1.ServiceList, *corev1.ReplicationContro Items: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, - Spec: apitesting.V1DeepEqualSafePodSpec(), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + DNSPolicy: corev1.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &corev1.PodSecurityContext{}, + }, }, { ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "test", ResourceVersion: "11"}, - Spec: apitesting.V1DeepEqualSafePodSpec(), + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyAlways, + DNSPolicy: corev1.DNSClusterFirst, + TerminationGracePeriodSeconds: &grace, + SecurityContext: &corev1.PodSecurityContext{}, + }, }, }, } diff --git a/pkg/kubectl/cmd/diff.go b/pkg/kubectl/cmd/diff.go index 33a5dc64f81..60fba88666d 100644 --- a/pkg/kubectl/cmd/diff.go +++ b/pkg/kubectl/cmd/diff.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "encoding/json" "fmt" "io" "io/ioutil" @@ -25,30 +24,28 @@ import ( "path/filepath" "github.com/ghodss/yaml" + "github.com/jonboulle/clockwork" "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/meta" + + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/client-go/dynamic" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/kubectl/apply/parse" - "k8s.io/kubernetes/pkg/kubectl/apply/strategy" + "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/utils/exec" ) var ( diffLong = templates.LongDesc(i18n.T(` - Diff configurations specified by filename or stdin between their local, - last-applied, live and/or "merged" versions. - - LOCAL and LIVE versions are diffed by default. Other available keywords - are MERGED and LAST. + Diff configurations specified by filename or stdin between the current online + configuration, and the configuration as it would be if applied. Output is always YAML. @@ -56,52 +53,22 @@ var ( diff command. By default, the "diff" command available in your path will be run with "-u" (unicode) and "-N" (treat new files as empty) options.`)) diffExample = templates.Examples(i18n.T(` - # Diff resources included in pod.json. By default, it will diff LOCAL and LIVE versions - kubectl alpha diff -f pod.json + # Diff resources included in pod.json. + kubectl diff -f pod.json - # When one version is specified, diff that version against LIVE - cat service.yaml | kubectl alpha diff -f - MERGED - - # Or specify both versions - kubectl alpha diff -f pod.json -f service.yaml LAST LOCAL`)) + # Diff file read from stdin + cat service.yaml | kubectl diff -f -`)) ) type DiffOptions struct { FilenameOptions resource.FilenameOptions } -func isValidArgument(arg string) error { - switch arg { - case "LOCAL", "LIVE", "LAST", "MERGED": - return nil - default: - return fmt.Errorf(`Invalid parameter %q, must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, arg) +func checkDiffArgs(cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) } - -} - -func parseDiffArguments(args []string) (string, string, error) { - if len(args) > 2 { - return "", "", fmt.Errorf("Invalid number of arguments: expected at most 2.") - } - // Default values - from := "LOCAL" - to := "LIVE" - if len(args) > 0 { - from = args[0] - } - if len(args) > 1 { - to = args[1] - } - - if err := isValidArgument(to); err != nil { - return "", "", err - } - if err := isValidArgument(from); err != nil { - return "", "", err - } - - return from, to, nil + return nil } func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { @@ -113,13 +80,12 @@ func NewCmdDiff(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.C cmd := &cobra.Command{ Use: "diff -f FILENAME", DisableFlagsInUseLine: true, - Short: i18n.T("Diff different versions of configurations"), + Short: i18n.T("Diff live version against would-be applied version"), Long: diffLong, Example: diffExample, Run: func(cmd *cobra.Command, args []string) { - from, to, err := parseDiffArguments(args) - cmdutil.CheckErr(err) - cmdutil.CheckErr(RunDiff(f, &diff, &options, from, to)) + cmdutil.CheckErr(checkDiffArgs(cmd, args)) + cmdutil.CheckErr(RunDiff(f, &diff, &options)) }, } @@ -164,7 +130,7 @@ func (d *DiffProgram) Run(from, to string) error { type Printer struct{} // Print the object inside the writer w. -func (p *Printer) Print(obj map[string]interface{}, w io.Writer) error { +func (p *Printer) Print(obj runtime.Object, w io.Writer) error { if obj == nil { return nil } @@ -195,16 +161,12 @@ func NewDiffVersion(name string) (*DiffVersion, error) { }, nil } -func (v *DiffVersion) getObject(obj Object) (map[string]interface{}, error) { +func (v *DiffVersion) getObject(obj Object) (runtime.Object, error) { switch v.Name { case "LIVE": - return obj.Live() + return obj.Live(), nil case "MERGED": return obj.Merged() - case "LOCAL": - return obj.Local() - case "LAST": - return obj.Last() } return nil, fmt.Errorf("Unknown version: %v", v.Name) } @@ -254,10 +216,8 @@ func (d *Directory) Delete() error { // Object is an interface that let's you retrieve multiple version of // it. type Object interface { - Local() (map[string]interface{}, error) - Live() (map[string]interface{}, error) - Last() (map[string]interface{}, error) - Merged() (map[string]interface{}, error) + Live() runtime.Object + Merged() (runtime.Object, error) Name() string } @@ -265,80 +225,51 @@ type Object interface { // InfoObject is an implementation of the Object interface. It gets all // the information from the Info object. type InfoObject struct { - Remote *unstructured.Unstructured - Info *resource.Info - Encoder runtime.Encoder - Parser *parse.Factory + LocalObj runtime.Object + Info *resource.Info + Encoder runtime.Encoder + OpenAPI openapi.Resources } var _ Object = &InfoObject{} -func (obj InfoObject) toMap(data []byte) (map[string]interface{}, error) { - m := map[string]interface{}{} - if len(data) == 0 { - return m, nil - } - err := json.Unmarshal(data, &m) - return m, err +// Returns the live version of the object +func (obj InfoObject) Live() runtime.Object { + return obj.Info.Object } -func (obj InfoObject) Local() (map[string]interface{}, error) { - data, err := runtime.Encode(obj.Encoder, obj.Info.Object) - if err != nil { - return nil, err +// Returns the "merged" object, as it would look like if applied or +// created. +func (obj InfoObject) Merged() (runtime.Object, error) { + // Build the patcher, and then apply the patch with dry-run, unless the object doesn't exist, in which case we need to create it. + if obj.Live() == nil { + // Dry-run create if the object doesn't exist. + return resource.NewHelper(obj.Info.Client, obj.Info.Mapping).Create( + obj.Info.Namespace, + true, + obj.LocalObj, + &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}, + ) } - return obj.toMap(data) -} -func (obj InfoObject) Live() (map[string]interface{}, error) { - if obj.Remote == nil { - return nil, nil // Object doesn't exist on cluster. - } - return obj.Remote.UnstructuredContent(), nil -} - -func (obj InfoObject) Merged() (map[string]interface{}, error) { - local, err := obj.Local() + modified, err := kubectl.GetModifiedConfiguration(obj.LocalObj, false, unstructured.UnstructuredJSONScheme) if err != nil { return nil, err } - live, err := obj.Live() - if err != nil { - return nil, err + // This is using the patcher from apply, to keep the same behavior. + // We plan on replacing this with server-side apply when it becomes available. + patcher := &patcher{ + mapping: obj.Info.Mapping, + helper: resource.NewHelper(obj.Info.Client, obj.Info.Mapping), + overwrite: true, + backOff: clockwork.NewRealClock(), + serverDryRun: true, + openapiSchema: obj.OpenAPI, } - last, err := obj.Last() - if err != nil { - return nil, err - } - - if live == nil || last == nil { - return local, nil // We probably don't have a live version, merged is local. - } - - elmt, err := obj.Parser.CreateElement(last, local, live) - if err != nil { - return nil, err - } - result, err := elmt.Merge(strategy.Create(strategy.Options{})) - return result.MergedResult.(map[string]interface{}), err -} - -func (obj InfoObject) Last() (map[string]interface{}, error) { - if obj.Remote == nil { - return nil, nil // No object is live, return empty - } - accessor, err := meta.Accessor(obj.Remote) - if err != nil { - return nil, err - } - annots := accessor.GetAnnotations() - if annots == nil { - return nil, nil // Not an error, just empty. - } - - return obj.toMap([]byte(annots[api.LastAppliedConfigAnnotation])) + _, result, err := patcher.patch(obj.Info.Object, modified, obj.Info.Source, obj.Info.Namespace, obj.Info.Name, nil) + return result, err } func (obj InfoObject) Name() string { @@ -389,61 +320,16 @@ func (d *Differ) TearDown() { d.To.Dir.Delete() // Ignore error } -type Downloader struct { - mapper meta.RESTMapper - dclient dynamic.Interface - ns string -} - -func NewDownloader(f cmdutil.Factory) (*Downloader, error) { - var err error - var d Downloader - - d.mapper, err = f.ToRESTMapper() - if err != nil { - return nil, err - } - d.dclient, err = f.DynamicClient() - if err != nil { - return nil, err - } - d.ns, _, _ = f.ToRawKubeConfigLoader().Namespace() - - return &d, nil -} - -func (d *Downloader) Download(info *resource.Info) (*unstructured.Unstructured, error) { - gvk := info.Object.GetObjectKind().GroupVersionKind() - mapping, err := d.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return nil, err - } - - var resource dynamic.ResourceInterface - switch mapping.Scope.Name() { - case meta.RESTScopeNameNamespace: - if info.Namespace == "" { - info.Namespace = d.ns - } - resource = d.dclient.Resource(mapping.Resource).Namespace(info.Namespace) - case meta.RESTScopeNameRoot: - resource = d.dclient.Resource(mapping.Resource) - } - - return resource.Get(info.Name, metav1.GetOptions{}) -} - // RunDiff uses the factory to parse file arguments, find the version to // diff, and find each Info object for each files, and runs against the // differ. -func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, to string) error { - openapi, err := f.OpenAPISchema() +func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions) error { + schema, err := f.OpenAPISchema() if err != nil { return err } - parser := &parse.Factory{Resources: openapi} - differ, err := NewDiffer(from, to) + differ, err := NewDiffer("LIVE", "MERGED") if err != nil { return err } @@ -460,29 +346,30 @@ func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, t Unstructured(). NamespaceParam(cmdNamespace).DefaultNamespace(). FilenameParam(enforceNamespace, &options.FilenameOptions). - Local(). Flatten(). Do() if err := r.Err(); err != nil { return err } - dl, err := NewDownloader(f) - if err != nil { - return err - } - err = r.Visit(func(info *resource.Info, err error) error { if err != nil { return err } - remote, _ := dl.Download(info) + local := info.Object.DeepCopyObject() + if err := info.Get(); err != nil { + if !errors.IsNotFound(err) { + return err + } + info.Object = nil + } + obj := InfoObject{ - Remote: remote, - Info: info, - Parser: parser, - Encoder: cmdutil.InternalVersionJSONEncoder(), + LocalObj: local, + Info: info, + Encoder: scheme.DefaultJSONEncoder(), + OpenAPI: schema, } return differ.Diff(obj, printer) @@ -491,7 +378,8 @@ func RunDiff(f cmdutil.Factory, diff *DiffProgram, options *DiffOptions, from, t return err } - differ.Run(diff) + // Error ignore on purpose. diff(1) for example, returns an error if there is any diff. + _ = differ.Run(diff) return nil } diff --git a/pkg/kubectl/cmd/diff_test.go b/pkg/kubectl/cmd/diff_test.go index af4b56e0fba..c7e14d53db5 100644 --- a/pkg/kubectl/cmd/diff_test.go +++ b/pkg/kubectl/cmd/diff_test.go @@ -25,16 +25,16 @@ import ( "strings" "testing" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/utils/exec" ) type FakeObject struct { name string - local map[string]interface{} merged map[string]interface{} live map[string]interface{} - last map[string]interface{} } var _ Object = &FakeObject{} @@ -43,97 +43,12 @@ func (f *FakeObject) Name() string { return f.name } -func (f *FakeObject) Local() (map[string]interface{}, error) { - return f.local, nil +func (f *FakeObject) Merged() (runtime.Object, error) { + return &unstructured.Unstructured{Object: f.merged}, nil } -func (f *FakeObject) Merged() (map[string]interface{}, error) { - return f.merged, nil -} - -func (f *FakeObject) Live() (map[string]interface{}, error) { - return f.live, nil -} - -func (f *FakeObject) Last() (map[string]interface{}, error) { - return f.last, nil -} - -func TestArguments(t *testing.T) { - tests := []struct { - // Input - args []string - - // Outputs - from string - to string - err string - }{ - // Defaults - { - args: []string{}, - from: "LOCAL", - to: "LIVE", - err: "", - }, - // One valid argument - { - args: []string{"MERGED"}, - from: "MERGED", - to: "LIVE", - err: "", - }, - // One invalid argument - { - args: []string{"WRONG"}, - from: "", - to: "", - err: `Invalid parameter "WRONG", must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, - }, - // Two valid arguments - { - args: []string{"MERGED", "LAST"}, - from: "MERGED", - to: "LAST", - err: "", - }, - // Two same arguments is fine - { - args: []string{"MERGED", "MERGED"}, - from: "MERGED", - to: "MERGED", - err: "", - }, - // Second argument is invalid - { - args: []string{"MERGED", "WRONG"}, - from: "", - to: "", - err: `Invalid parameter "WRONG", must be either "LOCAL", "LIVE", "LAST" or "MERGED"`, - }, - // Three arguments - { - args: []string{"MERGED", "LIVE", "LAST"}, - from: "", - to: "", - err: `Invalid number of arguments: expected at most 2.`, - }, - } - - for _, test := range tests { - from, to, e := parseDiffArguments(test.args) - err := "" - if e != nil { - err = e.Error() - } - if from != test.from || to != test.to || err != test.err { - t.Errorf("parseDiffArguments(%v) = (%v, %v, %v), expected (%v, %v, %v)", - test.args, - from, to, err, - test.from, test.to, test.err, - ) - } - } +func (f *FakeObject) Live() runtime.Object { + return &unstructured.Unstructured{Object: f.live} } func TestDiffProgram(t *testing.T) { @@ -155,11 +70,11 @@ func TestDiffProgram(t *testing.T) { func TestPrinter(t *testing.T) { printer := Printer{} - obj := map[string]interface{}{ + obj := &unstructured.Unstructured{Object: map[string]interface{}{ "string": "string", "list": []int{1, 2, 3}, "int": 12, - } + }} buf := bytes.Buffer{} printer.Print(obj, &buf) want := `int: 12 @@ -175,7 +90,7 @@ string: string } func TestDiffVersion(t *testing.T) { - diff, err := NewDiffVersion("LOCAL") + diff, err := NewDiffVersion("MERGED") if err != nil { t.Fatal(err) } @@ -183,8 +98,6 @@ func TestDiffVersion(t *testing.T) { obj := FakeObject{ name: "bla", - local: map[string]interface{}{"local": true}, - last: map[string]interface{}{"last": true}, live: map[string]interface{}{"live": true}, merged: map[string]interface{}{"merged": true}, } @@ -196,7 +109,7 @@ func TestDiffVersion(t *testing.T) { if err != nil { t.Fatal(err) } - econtent := "local: true\n" + econtent := "merged: true\n" if string(fcontent) != econtent { t.Fatalf("File has %q, expected %q", string(fcontent), econtent) } @@ -248,7 +161,7 @@ func TestDirectory(t *testing.T) { } func TestDiffer(t *testing.T) { - diff, err := NewDiffer("LOCAL", "LIVE") + diff, err := NewDiffer("LIVE", "MERGED") if err != nil { t.Fatal(err) } @@ -256,8 +169,6 @@ func TestDiffer(t *testing.T) { obj := FakeObject{ name: "bla", - local: map[string]interface{}{"local": true}, - last: map[string]interface{}{"last": true}, live: map[string]interface{}{"live": true}, merged: map[string]interface{}{"merged": true}, } @@ -269,7 +180,7 @@ func TestDiffer(t *testing.T) { if err != nil { t.Fatal(err) } - econtent := "local: true\n" + econtent := "live: true\n" if string(fcontent) != econtent { t.Fatalf("File has %q, expected %q", string(fcontent), econtent) } @@ -278,7 +189,7 @@ func TestDiffer(t *testing.T) { if err != nil { t.Fatal(err) } - econtent = "live: true\n" + econtent = "merged: true\n" if string(fcontent) != econtent { t.Fatalf("File has %q, expected %q", string(fcontent), econtent) } diff --git a/pkg/kubectl/cmd/drain.go b/pkg/kubectl/cmd/drain.go index 1c0ddc1a6ca..7496e205450 100644 --- a/pkg/kubectl/cmd/drain.go +++ b/pkg/kubectl/cmd/drain.go @@ -46,7 +46,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" @@ -284,7 +283,7 @@ func (o *DrainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } builder := f.NewBuilder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). ResourceNames("nodes", args...). SingleResourceType(). @@ -403,8 +402,7 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) { // Note that we return false in cases where the pod is DaemonSet managed, - // regardless of flags. We never delete them, the only question is whether - // their presence constitutes an error. + // regardless of flags. // // The exception is for pods that are orphaned (the referencing // management resource - including DaemonSet - is not found). @@ -413,12 +411,17 @@ func (o *DrainOptions) daemonsetFilter(pod corev1.Pod) (bool, *warning, *fatal) if controllerRef == nil || controllerRef.Kind != "DaemonSet" { return true, nil, nil } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return true, nil, nil + } if _, err := o.client.ExtensionsV1beta1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil { // remove orphaned pods with a warning if --force is used if apierrors.IsNotFound(err) && o.Force { return true, &warning{err.Error()}, nil } + return false, nil, &fatal{err.Error()} } @@ -450,9 +453,14 @@ func (o *DrainOptions) localStorageFilter(pod corev1.Pod) (bool, *warning, *fata if !hasLocalStorage(pod) { return true, nil, nil } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return true, nil, nil + } if !o.DeleteLocalData { return false, nil, &fatal{kLocalStorageFatal} } + return true, &warning{kLocalStorageWarning}, nil } @@ -723,7 +731,7 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error { for _, nodeInfo := range o.nodeInfos { if nodeInfo.Mapping.GroupVersionKind.Kind == "Node" { - obj, err := legacyscheme.Scheme.ConvertToVersion(nodeInfo.Object, nodeInfo.Mapping.GroupVersionKind.GroupVersion()) + obj, err := scheme.Scheme.ConvertToVersion(nodeInfo.Object, nodeInfo.Mapping.GroupVersionKind.GroupVersion()) if err != nil { fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err) continue diff --git a/pkg/kubectl/cmd/drain_test.go b/pkg/kubectl/cmd/drain_test.go index 0605d707ee4..2fa2e546b59 100644 --- a/pkg/kubectl/cmd/drain_test.go +++ b/pkg/kubectl/cmd/drain_test.go @@ -33,7 +33,9 @@ import ( "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,9 +47,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/client-go/rest/fake" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/apis/batch" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" @@ -238,7 +237,7 @@ func TestDrain(t *testing.T) { labels := make(map[string]string) labels["my_key"] = "my_value" - rc := api.ReplicationController{ + rc := corev1.ReplicationController{ ObjectMeta: metav1.ObjectMeta{ Name: "rc", Namespace: "default", @@ -246,7 +245,7 @@ func TestDrain(t *testing.T) { Labels: labels, SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"), }, - Spec: api.ReplicationControllerSpec{ + Spec: corev1.ReplicationControllerSpec{ Selector: labels, }, } @@ -274,14 +273,14 @@ func TestDrain(t *testing.T) { }, } - ds := extensions.DaemonSet{ + ds := extensionsv1beta1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: "ds", Namespace: "default", CreationTimestamp: metav1.Time{Time: time.Now()}, SelfLink: testapi.Default.SelfLink("daemonsets", "ds"), }, - Spec: extensions.DaemonSetSpec{ + Spec: extensionsv1beta1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: labels}, }, } @@ -308,6 +307,31 @@ func TestDrain(t *testing.T) { }, } + ds_terminated_pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "extensions/v1beta1", + Kind: "DaemonSet", + Name: "ds", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + } + ds_pod_with_emptyDir := corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -349,14 +373,14 @@ func TestDrain(t *testing.T) { }, } - job := batch.Job{ + job := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "job", Namespace: "default", CreationTimestamp: metav1.Time{Time: time.Now()}, SelfLink: testapi.Default.SelfLink("jobs", "job"), }, - Spec: batch.JobSpec{ + Spec: batchv1.JobSpec{ Selector: &metav1.LabelSelector{MatchLabels: labels}, }, } @@ -378,9 +402,49 @@ func TestDrain(t *testing.T) { }, }, }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, } - rs := extensions.ReplicaSet{ + terminated_job_pod_with_local_storage := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + SelfLink: testapi.Default.SelfLink("pods", "bar"), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "v1", + Kind: "Job", + Name: "job", + BlockOwnerDeletion: boolptr(true), + Controller: boolptr(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + } + + rs := extensionsv1beta1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: "rs", Namespace: "default", @@ -388,7 +452,7 @@ func TestDrain(t *testing.T) { Labels: labels, SelfLink: testapi.Default.SelfLink("replicasets", "rs"), }, - Spec: extensions.ReplicaSetSpec{ + Spec: extensionsv1beta1.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: labels}, }, } @@ -444,14 +508,34 @@ func TestDrain(t *testing.T) { }, }, } + emptydir_terminated_pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now()}, + Labels: labels, + }, + Spec: corev1.PodSpec{ + NodeName: "node", + Volumes: []corev1.Volume{ + { + Name: "scratch", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + }, + } tests := []struct { description string node *corev1.Node expected *corev1.Node pods []corev1.Pod - rcs []api.ReplicationController - replicaSets []extensions.ReplicaSet + rcs []corev1.ReplicationController + replicaSets []extensionsv1beta1.ReplicaSet args []string expectWarning string expectFatal bool @@ -462,7 +546,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{rc_pod}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, expectDelete: true, @@ -472,17 +556,27 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{ds_pod}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: true, expectDelete: false, }, + { + description: "DS-managed terminated pod", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{ds_terminated_pod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, { description: "orphaned DS-managed pod", node: node, expected: cordoned_node, pods: []corev1.Pod{orphaned_ds_pod}, - rcs: []api.ReplicationController{}, + rcs: []corev1.ReplicationController{}, args: []string{"node"}, expectFatal: true, expectDelete: false, @@ -492,7 +586,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{orphaned_ds_pod}, - rcs: []api.ReplicationController{}, + rcs: []corev1.ReplicationController{}, args: []string{"node", "--force"}, expectFatal: false, expectDelete: true, @@ -502,7 +596,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{ds_pod}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node", "--ignore-daemonsets"}, expectFatal: false, expectDelete: false, @@ -512,18 +606,28 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{ds_pod_with_emptyDir}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node", "--ignore-daemonsets"}, expectWarning: "WARNING: Ignoring DaemonSet-managed pods: bar\n", expectFatal: false, expectDelete: false, }, { - description: "Job-managed pod", + description: "Job-managed pod with local storage", node: node, expected: cordoned_node, pods: []corev1.Pod{job_pod}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node", "--force", "--delete-local-data=true"}, + expectFatal: false, + expectDelete: true, + }, + { + description: "Job-managed terminated pod", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{terminated_job_pod_with_local_storage}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, expectDelete: true, @@ -533,7 +637,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{rs_pod}, - replicaSets: []extensions.ReplicaSet{rs}, + replicaSets: []extensionsv1beta1.ReplicaSet{rs}, args: []string{"node"}, expectFatal: false, expectDelete: true, @@ -543,7 +647,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{naked_pod}, - rcs: []api.ReplicationController{}, + rcs: []corev1.ReplicationController{}, args: []string{"node"}, expectFatal: true, expectDelete: false, @@ -553,7 +657,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{naked_pod}, - rcs: []api.ReplicationController{}, + rcs: []corev1.ReplicationController{}, args: []string{"node", "--force"}, expectFatal: false, expectDelete: true, @@ -567,6 +671,16 @@ func TestDrain(t *testing.T) { expectFatal: true, expectDelete: false, }, + { + description: "terminated pod with emptyDir", + node: node, + expected: cordoned_node, + pods: []corev1.Pod{emptydir_terminated_pod}, + rcs: []corev1.ReplicationController{rc}, + args: []string{"node"}, + expectFatal: false, + expectDelete: true, + }, { description: "pod with EmptyDir and --delete-local-data", node: node, @@ -581,7 +695,7 @@ func TestDrain(t *testing.T) { node: node, expected: cordoned_node, pods: []corev1.Pod{}, - rcs: []api.ReplicationController{rc}, + rcs: []corev1.ReplicationController{rc}, args: []string{"node"}, expectFatal: false, expectDelete: false, @@ -651,7 +765,7 @@ func TestDrain(t *testing.T) { case m.isFor("GET", "/namespaces/default/daemonsets/ds"): return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &ds)}, nil case m.isFor("GET", "/namespaces/default/daemonsets/missing-ds"): - return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &extensions.DaemonSet{})}, nil + return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: objBody(testapi.Extensions.Codec(), &extensionsv1beta1.DaemonSet{})}, nil case m.isFor("GET", "/namespaces/default/jobs/job"): return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(testapi.Batch.Codec(), &job)}, nil case m.isFor("GET", "/namespaces/default/replicasets/rs"): @@ -670,7 +784,7 @@ func TestDrain(t *testing.T) { } return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &corev1.PodList{Items: test.pods})}, nil case m.isFor("GET", "/replicationcontrollers"): - return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationControllerList{Items: test.rcs})}, nil + return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &corev1.ReplicationControllerList{Items: test.rcs})}, nil case m.isFor("PATCH", "/nodes/node"): data, err := ioutil.ReadAll(req.Body) if err != nil { diff --git a/pkg/kubectl/cmd/exec.go b/pkg/kubectl/cmd/exec.go index 9e17037557a..25899ce8647 100644 --- a/pkg/kubectl/cmd/exec.go +++ b/pkg/kubectl/cmd/exec.go @@ -30,10 +30,9 @@ import ( coreclient "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api/legacyscheme" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/util/term" "k8s.io/kubernetes/pkg/util/interrupt" @@ -314,14 +313,14 @@ func (p *ExecOptions) Run() error { Namespace(pod.Namespace). SubResource("exec"). Param("container", containerName) - req.VersionedParams(&api.PodExecOptions{ + req.VersionedParams(&corev1.PodExecOptions{ Container: containerName, Command: p.Command, Stdin: p.Stdin, Stdout: p.Out != nil, Stderr: p.ErrOut != nil, TTY: t.Raw, - }, legacyscheme.ParameterCodec) + }, scheme.ParameterCodec) return p.Executor.Execute("POST", req.URL(), p.Config, p.In, p.Out, p.ErrOut, t.Raw, sizeQueue) } diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index 6a9222838fd..6341d345ec0 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -325,7 +325,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro if o.DryRun { return o.PrintObj(object, o.Out) } - if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), object, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/pkg/kubectl/cmd/get/get_test.go b/pkg/kubectl/cmd/get/get_test.go index b109c48f4d6..7311a5770f7 100644 --- a/pkg/kubectl/cmd/get/get_test.go +++ b/pkg/kubectl/cmd/get/get_test.go @@ -896,6 +896,7 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) { "spec": { "containers": null, "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, "restartPolicy": "Always", "securityContext": {}, "terminationGracePeriodSeconds": 30 @@ -914,6 +915,7 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) { "spec": { "containers": null, "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, "restartPolicy": "Always", "securityContext": {}, "terminationGracePeriodSeconds": 30 diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 4aedac04ef1..3ef0eb6f5b6 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -252,18 +252,25 @@ func (o *LabelOptions) RunLabel() error { } var outputObj runtime.Object - dataChangeMsg := "not labeled" + var dataChangeMsg string + obj := info.Object + oldData, err := json.Marshal(obj) + if err != nil { + return err + } if o.dryrun || o.local || o.list { - err = labelFunc(info.Object, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) + err = labelFunc(obj, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) if err != nil { return err } - dataChangeMsg = "labeled" + newObj, err := json.Marshal(obj) + if err != nil { + return err + } + dataChangeMsg = updateDataChangeMsg(oldData, newObj) outputObj = info.Object } else { - obj := info.Object name, namespace := info.Name, info.Namespace - oldData, err := json.Marshal(obj) if err != nil { return err } @@ -283,14 +290,12 @@ func (o *LabelOptions) RunLabel() error { if err := o.Recorder.Record(obj); err != nil { glog.V(4).Infof("error recording current command: %v", err) } - newData, err := json.Marshal(obj) + newObj, err := json.Marshal(obj) if err != nil { return err } - if !reflect.DeepEqual(oldData, newData) { - dataChangeMsg = "labeled" - } - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + dataChangeMsg = updateDataChangeMsg(oldData, newObj) + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newObj) createdPatch := err == nil if err != nil { glog.V(2).Infof("couldn't compute patch: %v", err) @@ -344,6 +349,14 @@ func (o *LabelOptions) RunLabel() error { }) } +func updateDataChangeMsg(oldObj []byte, newObj []byte) string { + msg := "not labeled" + if !reflect.DeepEqual(oldObj, newObj) { + msg = "labeled" + } + return msg +} + func validateNoOverwrites(accessor metav1.Object, labels map[string]string) error { allErrs := []error{} for key := range labels { diff --git a/pkg/kubectl/cmd/plugin.go b/pkg/kubectl/cmd/plugin.go index 31b1b29ef0a..76b3aa89592 100644 --- a/pkg/kubectl/cmd/plugin.go +++ b/pkg/kubectl/cmd/plugin.go @@ -26,6 +26,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -108,7 +109,8 @@ func (o *PluginListOptions) Run() error { pluginsFound := false isFirstFile := true pluginWarnings := 0 - for _, dir := range filepath.SplitList(os.Getenv(path)) { + paths := sets.NewString(filepath.SplitList(os.Getenv(path))...) + for _, dir := range paths.List() { files, err := ioutil.ReadDir(dir) if err != nil { continue diff --git a/pkg/kubectl/cmd/profiling.go b/pkg/kubectl/cmd/profiling.go new file mode 100644 index 00000000000..2a1c1ce3c15 --- /dev/null +++ b/pkg/kubectl/cmd/profiling.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "os" + "runtime" + "runtime/pprof" + + "github.com/spf13/pflag" +) + +var ( + profileName string + profileOutput string +) + +func addProfilingFlags(flags *pflag.FlagSet) { + flags.StringVar(&profileName, "profile", "none", "Name of profile to capture. One of (none|cpu|heap|goroutine|threadcreate|block|mutex)") + flags.StringVar(&profileOutput, "profile-output", "profile.pprof", "Name of the file to write the profile to") +} + +func initProfiling() error { + switch profileName { + case "none": + return nil + case "cpu": + f, err := os.Create(profileOutput) + if err != nil { + return err + } + return pprof.StartCPUProfile(f) + // Block and mutex profiles need a call to Set{Block,Mutex}ProfileRate to + // output anything. We choose to sample all events. + case "block": + runtime.SetBlockProfileRate(1) + return nil + case "mutex": + runtime.SetMutexProfileFraction(1) + return nil + default: + // Check the profile name is valid. + if profile := pprof.Lookup(profileName); profile == nil { + return fmt.Errorf("unknown profile '%s'", profileName) + } + } + + return nil +} + +func flushProfiling() error { + switch profileName { + case "none": + return nil + case "cpu": + pprof.StopCPUProfile() + case "heap": + runtime.GC() + fallthrough + default: + profile := pprof.Lookup(profileName) + if profile == nil { + return nil + } + f, err := os.Create(profileOutput) + if err != nil { + return err + } + profile.WriteTo(f, 0) + } + + return nil +} diff --git a/pkg/kubectl/cmd/replace.go b/pkg/kubectl/cmd/replace.go index d5ed3379f65..ed28a2c0ec3 100644 --- a/pkg/kubectl/cmd/replace.go +++ b/pkg/kubectl/cmd/replace.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" + "k8s.io/kubernetes/pkg/kubectl/scheme" "k8s.io/kubernetes/pkg/kubectl/util/i18n" "k8s.io/kubernetes/pkg/kubectl/validation" ) @@ -218,7 +219,7 @@ func (o *ReplaceOptions) Run() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, scheme.DefaultJSONEncoder()); err != nil { return cmdutil.AddSourceToErr("replacing", info.Source, err) } @@ -309,7 +310,7 @@ func (o *ReplaceOptions) forceReplace() error { return err } - if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(o.createAnnotation, info.Object, scheme.DefaultJSONEncoder()); err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/BUILD b/pkg/kubectl/cmd/rollout/BUILD index 3860f24fdac..95e238cd739 100644 --- a/pkg/kubectl/cmd/rollout/BUILD +++ b/pkg/kubectl/cmd/rollout/BUILD @@ -68,9 +68,9 @@ go_test( srcs = ["rollout_pause_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/api/legacyscheme:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/kubectl/cmd/testing:go_default_library", + "//pkg/kubectl/scheme:go_default_library", + "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/kubectl/cmd/rollout/rollout_history.go b/pkg/kubectl/cmd/rollout/rollout_history.go index a2a79449471..e58c9262fde 100644 --- a/pkg/kubectl/cmd/rollout/rollout_history.go +++ b/pkg/kubectl/cmd/rollout/rollout_history.go @@ -81,6 +81,7 @@ func NewCmdRolloutHistory(f cmdutil.Factory, streams genericclioptions.IOStreams Example: history_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.Run()) }, ValidArgs: validArgs, @@ -118,7 +119,7 @@ func (o *RolloutHistoryOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, func (o *RolloutHistoryOptions) Validate() error { if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { - return fmt.Errorf("Required resource not specified.") + return fmt.Errorf("required resource not specified") } if o.Revision < 0 { return fmt.Errorf("revision must be a positive integer: %v", o.Revision) diff --git a/pkg/kubectl/cmd/rollout/rollout_pause.go b/pkg/kubectl/cmd/rollout/rollout_pause.go index e9ba5a86ae1..2c53bd24778 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause.go @@ -26,7 +26,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/set" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -35,9 +34,9 @@ import ( "k8s.io/kubernetes/pkg/kubectl/util/i18n" ) -// PauseConfig is the start of the data required to perform the operation. As new fields are added, add them here instead of +// PauseOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of // referencing the cmd.Flags() -type PauseConfig struct { +type PauseOptions struct { PrintFlags *genericclioptions.PrintFlags ToPrinter func(string) (printers.ResourcePrinter, error) @@ -67,7 +66,7 @@ var ( ) func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command { - o := &PauseConfig{ + o := &PauseOptions{ PrintFlags: genericclioptions.NewPrintFlags("paused").WithTypeSetter(scheme.Scheme), IOStreams: streams, } @@ -81,16 +80,9 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) Long: pause_long, Example: pause_example, Run: func(cmd *cobra.Command, args []string) { - allErrs := []error{} - err := o.Complete(f, cmd, args) - if err != nil { - allErrs = append(allErrs, err) - } - err = o.RunPause() - if err != nil { - allErrs = append(allErrs, err) - } - cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunPause()) }, ValidArgs: validArgs, } @@ -102,15 +94,12 @@ func NewCmdRolloutPause(f cmdutil.Factory, streams genericclioptions.IOStreams) return cmd } -func (o *PauseConfig) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { - return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) - } - +func (o *PauseOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Pauser = polymorphichelpers.ObjectPauserFn var err error - if o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace(); err != nil { + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { return err } @@ -125,9 +114,16 @@ func (o *PauseConfig) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str return nil } -func (o PauseConfig) RunPause() error { +func (o *PauseOptions) Validate() error { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return fmt.Errorf("required resource not specified") + } + return nil +} + +func (o PauseOptions) RunPause() error { r := o.Builder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). FilenameParam(o.EnforceNamespace, &o.FilenameOptions). ResourceTypeOrNameArgs(true, o.Resources...). @@ -150,7 +146,7 @@ func (o PauseConfig) RunPause() error { allErrs = append(allErrs, err) } - for _, patch := range set.CalculatePatches(infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Pauser)) { + for _, patch := range set.CalculatePatches(infos, scheme.DefaultJSONEncoder(), set.PatchFn(o.Pauser)) { info := patch.Info if patch.Err != nil { diff --git a/pkg/kubectl/cmd/rollout/rollout_pause_test.go b/pkg/kubectl/cmd/rollout/rollout_pause_test.go index 2e7886075cb..03663aa4e67 100644 --- a/pkg/kubectl/cmd/rollout/rollout_pause_test.go +++ b/pkg/kubectl/cmd/rollout/rollout_pause_test.go @@ -23,23 +23,23 @@ import ( "net/url" "testing" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/cli-runtime/pkg/genericclioptions" restclient "k8s.io/client-go/rest" "k8s.io/client-go/rest/fake" - "k8s.io/kubernetes/pkg/api/legacyscheme" - extensions "k8s.io/kubernetes/pkg/apis/extensions" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" + "k8s.io/kubernetes/pkg/kubectl/scheme" ) var rolloutPauseGroupVersionEncoder = schema.GroupVersion{Group: "extensions", Version: "v1beta1"} -var rolloutPauseGroupVersionDecoder = schema.GroupVersion{Group: "extensions", Version: runtime.APIVersionInternal} +var rolloutPauseGroupVersionDecoder = schema.GroupVersion{Group: "extensions", Version: "v1beta1"} func TestRolloutPause(t *testing.T) { deploymentName := "deployment/nginx-deployment" - ns := legacyscheme.Codecs + ns := scheme.Codecs tf := cmdtesting.NewTestFactory().WithNamespace("test") info, _ := runtime.SerializerInfoForMediaType(ns.SupportedMediaTypes(), runtime.ContentTypeJSON) @@ -50,7 +50,7 @@ func TestRolloutPause(t *testing.T) { Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == "/namespaces/test/deployments/nginx-deployment" && (m == "GET" || m == "PATCH"): - responseDeployment := &extensions.Deployment{} + responseDeployment := &extensionsv1beta1.Deployment{} responseDeployment.Name = deploymentName body := ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(encoder, responseDeployment)))) return &http.Response{StatusCode: http.StatusOK, Header: defaultHeader(), Body: body}, nil diff --git a/pkg/kubectl/cmd/rollout/rollout_resume.go b/pkg/kubectl/cmd/rollout/rollout_resume.go index 25443355512..d94a3858ce7 100644 --- a/pkg/kubectl/cmd/rollout/rollout_resume.go +++ b/pkg/kubectl/cmd/rollout/rollout_resume.go @@ -26,7 +26,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl/cmd/set" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -84,16 +83,9 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) Long: resume_long, Example: resume_example, Run: func(cmd *cobra.Command, args []string) { - allErrs := []error{} - err := o.Complete(f, cmd, args) - if err != nil { - allErrs = append(allErrs, err) - } - err = o.RunResume() - if err != nil { - allErrs = append(allErrs, err) - } - cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunResume()) }, ValidArgs: validArgs, } @@ -105,10 +97,6 @@ func NewCmdRolloutResume(f cmdutil.Factory, streams genericclioptions.IOStreams) } func (o *ResumeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { - return cmdutil.UsageErrorf(cmd, "%s", cmd.Use) - } - o.Resources = args o.Resumer = polymorphichelpers.ObjectResumerFn @@ -129,9 +117,16 @@ func (o *ResumeOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []s return nil } +func (o *ResumeOptions) Validate() error { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return fmt.Errorf("required resource not specified") + } + return nil +} + func (o ResumeOptions) RunResume() error { r := o.Builder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). NamespaceParam(o.Namespace).DefaultNamespace(). FilenameParam(o.EnforceNamespace, &o.FilenameOptions). ResourceTypeOrNameArgs(true, o.Resources...). @@ -154,7 +149,7 @@ func (o ResumeOptions) RunResume() error { allErrs = append(allErrs, err) } - for _, patch := range set.CalculatePatches(infos, cmdutil.InternalVersionJSONEncoder(), set.PatchFn(o.Resumer)) { + for _, patch := range set.CalculatePatches(infos, scheme.DefaultJSONEncoder(), set.PatchFn(o.Resumer)) { info := patch.Info if patch.Err != nil { diff --git a/pkg/kubectl/cmd/rollout/rollout_status.go b/pkg/kubectl/cmd/rollout/rollout_status.go index 160f4e845b3..98b617e20d6 100644 --- a/pkg/kubectl/cmd/rollout/rollout_status.go +++ b/pkg/kubectl/cmd/rollout/rollout_status.go @@ -71,9 +71,9 @@ type RolloutStatusOptions struct { Revision int64 Timeout time.Duration - StatusViewer func(*meta.RESTMapping) (kubectl.StatusViewer, error) - Builder func() *resource.Builder - DynamicClient dynamic.Interface + StatusViewerFn func(*meta.RESTMapping) (kubectl.StatusViewer, error) + Builder func() *resource.Builder + DynamicClient dynamic.Interface FilenameOptions *resource.FilenameOptions genericclioptions.IOStreams @@ -102,7 +102,7 @@ func NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) Example: status_example, Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, args)) - cmdutil.CheckErr(o.Validate(cmd, args)) + cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.Run()) }, ValidArgs: validArgs, @@ -127,9 +127,7 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error } o.BuilderArgs = args - o.StatusViewer = func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - return polymorphichelpers.StatusViewerFn(f, mapping) - } + o.StatusViewerFn = polymorphichelpers.StatusViewerFn clientConfig, err := f.ToRESTConfig() if err != nil { @@ -144,9 +142,9 @@ func (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error return nil } -func (o *RolloutStatusOptions) Validate(cmd *cobra.Command, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { - return cmdutil.UsageErrorf(cmd, "Required resource not specified.") +func (o *RolloutStatusOptions) Validate() error { + if len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames) { + return fmt.Errorf("required resource not specified") } if o.Revision < 0 { @@ -180,7 +178,7 @@ func (o *RolloutStatusOptions) Run() error { info := infos[0] mapping := info.ResourceMapping() - statusViewer, err := o.StatusViewer(mapping) + statusViewer, err := o.StatusViewerFn(mapping) if err != nil { return err } diff --git a/pkg/kubectl/cmd/rollout/rollout_undo.go b/pkg/kubectl/cmd/rollout/rollout_undo.go index d2150089ee4..b7e287fb4d7 100644 --- a/pkg/kubectl/cmd/rollout/rollout_undo.go +++ b/pkg/kubectl/cmd/rollout/rollout_undo.go @@ -17,10 +17,11 @@ limitations under the License. package rollout import ( + "fmt" + "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers" - utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" @@ -84,16 +85,9 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * Long: undo_long, Example: undo_example, Run: func(cmd *cobra.Command, args []string) { - allErrs := []error{} - err := o.Complete(f, cmd, args) - if err != nil { - allErrs = append(allErrs, err) - } - err = o.RunUndo() - if err != nil { - allErrs = append(allErrs, err) - } - cmdutil.CheckErr(utilerrors.Flatten(utilerrors.NewAggregate(allErrs))) + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunUndo()) }, ValidArgs: validArgs, } @@ -107,10 +101,6 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * } func (o *UndoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { - return cmdutil.UsageErrorf(cmd, "Required resource not specified.") - } - o.Resources = args o.DryRun = cmdutil.GetDryRunFlag(cmd) @@ -133,6 +123,13 @@ func (o *UndoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []str return err } +func (o *UndoOptions) Validate() error { + if len(o.Resources) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames) { + return fmt.Errorf("required resource not specified") + } + return nil +} + func (o *UndoOptions) RunUndo() error { r := o.Builder(). WithScheme(legacyscheme.Scheme). diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 111ef6363da..4a2cd29e532 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -38,7 +38,6 @@ import ( "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -461,7 +460,7 @@ func (o *RunOptions) removeCreatedObjects(f cmdutil.Factory, createdObjects []*R return err } r := f.NewBuilder(). - WithScheme(legacyscheme.Scheme). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). ContinueOnError(). NamespaceParam(namespace).DefaultNamespace(). ResourceNames(obj.Mapping.Resource.Resource+"."+obj.Mapping.Resource.Group, name). diff --git a/pkg/kubectl/cmd/top_node.go b/pkg/kubectl/cmd/top_node.go index c224d3f36e6..2c4b77657af 100644 --- a/pkg/kubectl/cmd/top_node.go +++ b/pkg/kubectl/cmd/top_node.go @@ -40,6 +40,7 @@ import ( type TopNodeOptions struct { ResourceName string Selector string + NoHeaders bool NodeClient corev1client.CoreV1Interface HeapsterOptions HeapsterTopOptions Client *metricsutil.HeapsterMetricsClient @@ -118,6 +119,8 @@ func NewCmdTopNode(f cmdutil.Factory, o *TopNodeOptions, streams genericclioptio Aliases: []string{"nodes", "no"}, } cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers") + o.HeapsterOptions.Bind(cmd.Flags()) return cmd } @@ -216,7 +219,7 @@ func (o TopNodeOptions) RunTopNode() error { allocatable[n.Name] = n.Status.Allocatable } - return o.Printer.PrintNodeMetrics(metrics.Items, allocatable) + return o.Printer.PrintNodeMetrics(metrics.Items, allocatable, o.NoHeaders) } func getNodeMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, resourceName string, selector labels.Selector) (*metricsapi.NodeMetricsList, error) { diff --git a/pkg/kubectl/cmd/top_node_test.go b/pkg/kubectl/cmd/top_node_test.go index 8abf81ae1f1..ec81d7fd8e0 100644 --- a/pkg/kubectl/cmd/top_node_test.go +++ b/pkg/kubectl/cmd/top_node_test.go @@ -81,6 +81,7 @@ func TestTopNodeAllMetrics(t *testing.T) { streams, _, buf, _ := genericclioptions.NewTestIOStreams() cmd := NewCmdTopNode(tf, nil, streams) + cmd.Flags().Set("no-headers", "true") cmd.Run(cmd, []string{}) // Check the presence of node names in the output. @@ -90,6 +91,9 @@ func TestTopNodeAllMetrics(t *testing.T) { t.Errorf("missing metrics for %s: \n%s", m.Name, result) } } + if strings.Contains(result, "MEMORY") { + t.Errorf("should not print headers with --no-headers option set:\n%s\n", result) + } } func TestTopNodeAllMetricsCustomDefaults(t *testing.T) { diff --git a/pkg/kubectl/cmd/top_pod.go b/pkg/kubectl/cmd/top_pod.go index e3a2c35e517..657d1ff5a0f 100644 --- a/pkg/kubectl/cmd/top_pod.go +++ b/pkg/kubectl/cmd/top_pod.go @@ -45,6 +45,7 @@ type TopPodOptions struct { Selector string AllNamespaces bool PrintContainers bool + NoHeaders bool PodClient corev1client.PodsGetter HeapsterOptions HeapsterTopOptions Client *metricsutil.HeapsterMetricsClient @@ -109,6 +110,7 @@ func NewCmdTopPod(f cmdutil.Factory, o *TopPodOptions, streams genericclioptions cmd.Flags().StringVarP(&o.Selector, "selector", "l", o.Selector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") cmd.Flags().BoolVar(&o.PrintContainers, "containers", o.PrintContainers, "If present, print usage of containers within a pod.") cmd.Flags().BoolVar(&o.AllNamespaces, "all-namespaces", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers.") o.HeapsterOptions.Bind(cmd.Flags()) return cmd } @@ -198,7 +200,7 @@ func (o TopPodOptions) RunTopPod() error { return err } - return o.Printer.PrintPodMetrics(metrics.Items, o.PrintContainers, o.AllNamespaces) + return o.Printer.PrintPodMetrics(metrics.Items, o.PrintContainers, o.AllNamespaces, o.NoHeaders) } func getMetricsFromMetricsAPI(metricsClient metricsclientset.Interface, namespace, resourceName string, allNamespaces bool, selector labels.Selector) (*metricsapi.PodMetricsList, error) { diff --git a/pkg/kubectl/cmd/top_pod_test.go b/pkg/kubectl/cmd/top_pod_test.go index 6ee773c6ed1..96148c49e02 100644 --- a/pkg/kubectl/cmd/top_pod_test.go +++ b/pkg/kubectl/cmd/top_pod_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/rest/fake" core "k8s.io/client-go/testing" cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/scheme" metricsv1alpha1api "k8s.io/metrics/pkg/apis/metrics/v1alpha1" metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" @@ -132,6 +133,14 @@ func TestTopPod(t *testing.T) { namespaces: []string{testNS}, containers: true, }, + { + name: "no-headers set", + flags: map[string]string{"containers": "true", "no-headers": "true"}, + args: []string{"pod1"}, + expectedPath: topPathPrefix + "/namespaces/" + testNS + "/pods/pod1", + namespaces: []string{testNS}, + containers: true, + }, } initTestErrorHandler(t) for _, testCase := range testCases { @@ -221,6 +230,9 @@ func TestTopPod(t *testing.T) { t.Errorf("%s: unexpected metrics for %s: \n%s", testCase.name, name, result) } } + if cmdutil.GetFlagBool(cmd, "no-headers") && strings.Contains(result, "MEMORY") { + t.Errorf("%s: unexpected headers with no-headers option set: \n%s", testCase.name, result) + } }) } } diff --git a/pkg/kubectl/cmd/util/BUILD b/pkg/kubectl/cmd/util/BUILD index 50eee141914..697c7f2bbe7 100644 --- a/pkg/kubectl/cmd/util/BUILD +++ b/pkg/kubectl/cmd/util/BUILD @@ -64,6 +64,7 @@ go_test( "//pkg/api/testapi:go_default_library", "//pkg/api/testing:go_default_library", "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", diff --git a/pkg/kubectl/cmd/util/editor/BUILD b/pkg/kubectl/cmd/util/editor/BUILD index d2927e03bd7..464d84b1ff4 100644 --- a/pkg/kubectl/cmd/util/editor/BUILD +++ b/pkg/kubectl/cmd/util/editor/BUILD @@ -15,12 +15,12 @@ go_library( "//build/visible_to:pkg_kubectl_cmd_util_editor_CONSUMERS", ], deps = [ - "//pkg/apis/core:go_default_library", "//pkg/kubectl:go_default_library", "//pkg/kubectl/cmd/util:go_default_library", "//pkg/kubectl/cmd/util/editor/crlf:go_default_library", "//pkg/kubectl/scheme:go_default_library", "//pkg/kubectl/util/term:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", diff --git a/pkg/kubectl/cmd/util/editor/editoptions.go b/pkg/kubectl/cmd/util/editor/editoptions.go index f94f3cd2c51..ab43141cf7c 100644 --- a/pkg/kubectl/cmd/util/editor/editoptions.go +++ b/pkg/kubectl/cmd/util/editor/editoptions.go @@ -32,6 +32,7 @@ import ( "github.com/golang/glog" "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -45,7 +46,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions/printers" "k8s.io/cli-runtime/pkg/genericclioptions/resource" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util/editor/crlf" @@ -300,7 +300,8 @@ func (o *EditOptions) Run() error { file: file, } containsError = true - fmt.Fprintln(o.ErrOut, results.addError(apierrors.NewInvalid(api.Kind(""), "", field.ErrorList{field.Invalid(nil, "The edited file failed validation", fmt.Sprintf("%v", err))}), infos[0])) + fmt.Fprintln(o.ErrOut, results.addError(apierrors.NewInvalid(corev1.SchemeGroupVersion.WithKind("").GroupKind(), + "", field.ErrorList{field.Invalid(nil, "The edited file failed validation", fmt.Sprintf("%v", err))}), infos[0])) continue } @@ -524,7 +525,7 @@ func GetApplyPatch(obj runtime.Unstructured) ([]byte, []byte, types.PatchType, e if annotations == nil { annotations = map[string]string{} } - annotations[api.LastAppliedConfigAnnotation] = string(beforeJSON) + annotations[corev1.LastAppliedConfigAnnotation] = string(beforeJSON) accessor.SetAnnotations(objCopy, annotations) afterJSON, err := encodeToJson(objCopy.(runtime.Unstructured)) if err != nil { @@ -668,7 +669,7 @@ func (o *EditOptions) visitAnnotation(annotationVisitor resource.Visitor) error err := annotationVisitor.Visit(func(info *resource.Info, incomingErr error) error { // put configuration annotation in "updates" if o.ApplyAnnotation { - if err := kubectl.CreateOrUpdateAnnotation(true, info.Object, cmdutil.InternalVersionJSONEncoder()); err != nil { + if err := kubectl.CreateOrUpdateAnnotation(true, info.Object, scheme.DefaultJSONEncoder()); err != nil { return err } } diff --git a/pkg/kubectl/cmd/util/helpers_test.go b/pkg/kubectl/cmd/util/helpers_test.go index 65c8fde20fb..c4ae341915c 100644 --- a/pkg/kubectl/cmd/util/helpers_test.go +++ b/pkg/kubectl/cmd/util/helpers_test.go @@ -24,6 +24,7 @@ import ( "syscall" "testing" + "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -39,6 +40,7 @@ import ( func TestMerge(t *testing.T) { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks tests := []struct { obj runtime.Object fragment string @@ -125,6 +127,7 @@ func TestMerge(t *testing.T) { TerminationGracePeriodSeconds: &grace, SecurityContext: &api.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, }, }, diff --git a/pkg/kubectl/conditions.go b/pkg/kubectl/conditions.go index 36a752c9ad2..405bc6d4634 100644 --- a/pkg/kubectl/conditions.go +++ b/pkg/kubectl/conditions.go @@ -26,9 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api/pod" - podv1 "k8s.io/kubernetes/pkg/api/v1/pod" - api "k8s.io/kubernetes/pkg/apis/core" ) // ControllerHasDesiredReplicas returns a condition that will be true if and only if @@ -56,36 +53,6 @@ func ControllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGe // the pod has already reached completed state. var ErrPodCompleted = fmt.Errorf("pod ran to completion") -// ErrContainerTerminated is returned by PodContainerRunning in the intermediate -// state where the pod indicates it's still running, but its container is already terminated -var ErrContainerTerminated = fmt.Errorf("container terminated") - -// PodRunning returns true if the pod is running, false if the pod has not yet reached running state, -// returns ErrPodCompleted if the pod has run to completion, or an error in any other case. -func PodRunning(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodRunning: - return true, nil - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - } - case *corev1.Pod: - switch t.Status.Phase { - case corev1.PodRunning: - return true, nil - case corev1.PodFailed, corev1.PodSucceeded: - return false, ErrPodCompleted - } - } - return false, nil -} - // PodCompleted returns true if the pod has run to completion, false if the pod has not yet // reached running state, or an error in any other case. func PodCompleted(event watch.Event) (bool, error) { @@ -94,11 +61,6 @@ func PodCompleted(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return true, nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: @@ -117,38 +79,21 @@ func PodRunningAndReady(event watch.Event) (bool, error) { return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodFailed, api.PodSucceeded: - return false, ErrPodCompleted - case api.PodRunning: - return pod.IsPodReady(t), nil - } case *corev1.Pod: switch t.Status.Phase { case corev1.PodFailed, corev1.PodSucceeded: return false, ErrPodCompleted case corev1.PodRunning: - return podv1.IsPodReady(t), nil - } - } - return false, nil -} - -// PodNotPending returns true if the pod has left the pending state, false if it has not, -// or an error in any other case (such as if the pod was deleted). -func PodNotPending(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "") - } - switch t := event.Object.(type) { - case *api.Pod: - switch t.Status.Phase { - case api.PodPending: - return false, nil - default: - return true, nil + conditions := t.Status.Conditions + if conditions == nil { + return false, nil + } + for i := range conditions { + if conditions[i].Type == corev1.PodReady && + conditions[i].Status == corev1.ConditionTrue { + return true, nil + } + } } } return false, nil diff --git a/pkg/kubectl/metricsutil/metrics_printer.go b/pkg/kubectl/metricsutil/metrics_printer.go index 9e1f45b25c8..b13c39273d1 100644 --- a/pkg/kubectl/metricsutil/metrics_printer.go +++ b/pkg/kubectl/metricsutil/metrics_printer.go @@ -53,7 +53,7 @@ func NewTopCmdPrinter(out io.Writer) *TopCmdPrinter { return &TopCmdPrinter{out: out} } -func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]v1.ResourceList) error { +func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, availableResources map[string]v1.ResourceList, noHeaders bool) error { if len(metrics) == 0 { return nil } @@ -63,8 +63,9 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, sort.Slice(metrics, func(i, j int) bool { return metrics[i].Name < metrics[j].Name }) - - printColumnNames(w, NodeColumns) + if !noHeaders { + printColumnNames(w, NodeColumns) + } var usage v1.ResourceList for _, m := range metrics { err := scheme.Scheme.Convert(&m.Usage, &usage, nil) @@ -86,18 +87,20 @@ func (printer *TopCmdPrinter) PrintNodeMetrics(metrics []metricsapi.NodeMetrics, return nil } -func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool) error { +func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, printContainers bool, withNamespace bool, noHeaders bool) error { if len(metrics) == 0 { return nil } w := printers.GetNewTabWriter(printer.out) defer w.Flush() - - if withNamespace { - printValue(w, NamespaceColumn) - } - if printContainers { - printValue(w, PodColumn) + if !noHeaders { + if withNamespace { + printValue(w, NamespaceColumn) + } + if printContainers { + printValue(w, PodColumn) + } + printColumnNames(w, PodColumns) } sort.Slice(metrics, func(i, j int) bool { @@ -106,8 +109,6 @@ func (printer *TopCmdPrinter) PrintPodMetrics(metrics []metricsapi.PodMetrics, p } return metrics[i].Name < metrics[j].Name }) - - printColumnNames(w, PodColumns) for _, m := range metrics { err := printSinglePodMetrics(w, &m, printContainers, withNamespace) if err != nil { diff --git a/pkg/kubectl/polymorphichelpers/BUILD b/pkg/kubectl/polymorphichelpers/BUILD index fd6d8722c30..bb1442ca693 100644 --- a/pkg/kubectl/polymorphichelpers/BUILD +++ b/pkg/kubectl/polymorphichelpers/BUILD @@ -22,11 +22,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/kubectl/polymorphichelpers", visibility = ["//visibility:public"], deps = [ - "//pkg/api/legacyscheme:go_default_library", "//pkg/apis/apps:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/core:go_default_library", - "//pkg/apis/core/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/controller:go_default_library", "//pkg/kubectl:go_default_library", @@ -41,7 +39,6 @@ go_library( "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", @@ -70,10 +67,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//pkg/apis/apps:go_default_library", - "//pkg/apis/batch:go_default_library", - "//pkg/apis/core:go_default_library", - "//pkg/apis/extensions:go_default_library", "//pkg/controller:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", diff --git a/pkg/kubectl/polymorphichelpers/attachablepodforobject.go b/pkg/kubectl/polymorphichelpers/attachablepodforobject.go index bb6147e2587..bb4e366228b 100644 --- a/pkg/kubectl/polymorphichelpers/attachablepodforobject.go +++ b/pkg/kubectl/polymorphichelpers/attachablepodforobject.go @@ -21,23 +21,16 @@ import ( "sort" "time" - "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/genericclioptions" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - api "k8s.io/kubernetes/pkg/apis/core" - apiv1 "k8s.io/kubernetes/pkg/apis/core/v1" "k8s.io/kubernetes/pkg/controller" ) // attachablePodForObject returns the pod to which to attach given an object. func attachablePodForObject(restClientGetter genericclioptions.RESTClientGetter, object runtime.Object, timeout time.Duration) (*corev1.Pod, error) { switch t := object.(type) { - case *api.Pod: - externalPod := &corev1.Pod{} - err := apiv1.Convert_core_Pod_To_v1_Pod(t, externalPod, nil) - return externalPod, err case *corev1.Pod: return t, nil } @@ -55,7 +48,7 @@ func attachablePodForObject(restClientGetter genericclioptions.RESTClientGetter, if err != nil { return nil, fmt.Errorf("cannot attach to %T: %v", object, err) } - sortBy := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } + sortBy := func(pods []*corev1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } pod, _, err := GetFirstPod(clientset, namespace, selector.String(), timeout, sortBy) return pod, err } diff --git a/pkg/kubectl/polymorphichelpers/canbeautoscaled.go b/pkg/kubectl/polymorphichelpers/canbeautoscaled.go index 6009f2b5733..696e69a0f46 100644 --- a/pkg/kubectl/polymorphichelpers/canbeautoscaled.go +++ b/pkg/kubectl/polymorphichelpers/canbeautoscaled.go @@ -19,16 +19,20 @@ package polymorphichelpers import ( "fmt" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/apps" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func canBeAutoscaled(kind schema.GroupKind) error { switch kind { - case api.Kind("ReplicationController"), extensions.Kind("ReplicaSet"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), apps.Kind("ReplicaSet"): + case + corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) diff --git a/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go b/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go index c2f0e8a027c..a4cbe03f9e0 100644 --- a/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go +++ b/pkg/kubectl/polymorphichelpers/canbeautoscaled_test.go @@ -19,8 +19,10 @@ package polymorphichelpers import ( "testing" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - api "k8s.io/kubernetes/pkg/apis/core" ) func TestCanBeAutoscaled(t *testing.T) { @@ -29,11 +31,27 @@ func TestCanBeAutoscaled(t *testing.T) { expectErr bool }{ { - kind: api.Kind("ReplicationController"), + kind: corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), expectErr: false, }, { - kind: api.Kind("Node"), + kind: appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + expectErr: false, + }, + { + kind: extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + expectErr: false, + }, + { + kind: corev1.SchemeGroupVersion.WithKind("Node").GroupKind(), + expectErr: true, + }, + { + kind: corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), + expectErr: true, + }, + { + kind: corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/canbeexposed.go b/pkg/kubectl/polymorphichelpers/canbeexposed.go index af4463fe999..b232ff853fe 100644 --- a/pkg/kubectl/polymorphichelpers/canbeexposed.go +++ b/pkg/kubectl/polymorphichelpers/canbeexposed.go @@ -19,17 +19,23 @@ package polymorphichelpers import ( "fmt" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/apis/apps" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) // Check whether the kind of resources could be exposed func canBeExposed(kind schema.GroupKind) error { switch kind { - case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), - extensions.Kind("Deployment"), apps.Kind("Deployment"), extensions.Kind("ReplicaSet"), apps.Kind("ReplicaSet"): + case + corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), + corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) diff --git a/pkg/kubectl/polymorphichelpers/canbeexposed_test.go b/pkg/kubectl/polymorphichelpers/canbeexposed_test.go index b9e8d492dc2..b1cd5b2d17a 100644 --- a/pkg/kubectl/polymorphichelpers/canbeexposed_test.go +++ b/pkg/kubectl/polymorphichelpers/canbeexposed_test.go @@ -19,8 +19,10 @@ package polymorphichelpers import ( "testing" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" - api "k8s.io/kubernetes/pkg/apis/core" ) func TestCanBeExposed(t *testing.T) { @@ -29,11 +31,27 @@ func TestCanBeExposed(t *testing.T) { expectErr bool }{ { - kind: api.Kind("ReplicationController"), + kind: corev1.SchemeGroupVersion.WithKind("ReplicationController").GroupKind(), expectErr: false, }, { - kind: api.Kind("Node"), + kind: corev1.SchemeGroupVersion.WithKind("Service").GroupKind(), + expectErr: false, + }, + { + kind: corev1.SchemeGroupVersion.WithKind("Pod").GroupKind(), + expectErr: false, + }, + { + kind: appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + expectErr: false, + }, + { + kind: extensionsv1beta1.SchemeGroupVersion.WithKind("ReplicaSet").GroupKind(), + expectErr: false, + }, + { + kind: corev1.SchemeGroupVersion.WithKind("Node").GroupKind(), expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/helpers_test.go b/pkg/kubectl/polymorphichelpers/helpers_test.go index 21f8af20d16..91e582f8aa6 100644 --- a/pkg/kubectl/polymorphichelpers/helpers_test.go +++ b/pkg/kubectl/polymorphichelpers/helpers_test.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/watch" fakeexternal "k8s.io/client-go/kubernetes/fake" testcore "k8s.io/client-go/testing" - api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" ) @@ -119,18 +118,18 @@ func TestGetFirstPod(t *testing.T) { watching: []watch.Event{ { Type: watch.Modified, - Object: &api.Pod{ + Object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", Namespace: metav1.NamespaceDefault, CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC), Labels: map[string]string{"test": "selector"}, }, - Status: api.PodStatus{ - Conditions: []api.PodCondition{ + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Status: api.ConditionTrue, - Type: api.PodReady, + Status: corev1.ConditionTrue, + Type: corev1.PodReady, }, }, }, diff --git a/pkg/kubectl/polymorphichelpers/interface.go b/pkg/kubectl/polymorphichelpers/interface.go index 348505d49d7..816bf91427c 100644 --- a/pkg/kubectl/polymorphichelpers/interface.go +++ b/pkg/kubectl/polymorphichelpers/interface.go @@ -47,7 +47,7 @@ type HistoryViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, var HistoryViewerFn HistoryViewerFunc = historyViewer // StatusViewerFunc is a function type that can tell you how to print rollout status -type StatusViewerFunc func(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.StatusViewer, error) +type StatusViewerFunc func(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) // StatusViewerFn gives a way to easily override the function for unit testing if needed var StatusViewerFn StatusViewerFunc = statusViewer diff --git a/pkg/kubectl/polymorphichelpers/logsforobject.go b/pkg/kubectl/polymorphichelpers/logsforobject.go index 0378b6e0cc9..069469abd3a 100644 --- a/pkg/kubectl/polymorphichelpers/logsforobject.go +++ b/pkg/kubectl/polymorphichelpers/logsforobject.go @@ -29,7 +29,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" - coreinternal "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/controller" ) @@ -55,17 +54,6 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt } switch t := object.(type) { - case *coreinternal.PodList: - ret := []*rest.Request{} - for i := range t.Items { - currRet, err := logsForObjectWithClient(clientset, &t.Items[i], options, timeout, allContainers) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - return ret, nil - case *corev1.PodList: ret := []*rest.Request{} for i := range t.Items { @@ -77,34 +65,6 @@ func logsForObjectWithClient(clientset corev1client.CoreV1Interface, object, opt } return ret, nil - case *coreinternal.Pod: - // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false - if !allContainers { - return []*rest.Request{clientset.Pods(t.Namespace).GetLogs(t.Name, opts)}, nil - } - - ret := []*rest.Request{} - for _, c := range t.Spec.InitContainers { - currOpts := opts.DeepCopy() - currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - for _, c := range t.Spec.Containers { - currOpts := opts.DeepCopy() - currOpts.Container = c.Name - currRet, err := logsForObjectWithClient(clientset, t, currOpts, timeout, false) - if err != nil { - return nil, err - } - ret = append(ret, currRet...) - } - - return ret, nil - case *corev1.Pod: // if allContainers is true, then we're going to locate all containers and then iterate through them. At that point, "allContainers" is false if !allContainers { diff --git a/pkg/kubectl/polymorphichelpers/logsforobject_test.go b/pkg/kubectl/polymorphichelpers/logsforobject_test.go index a90e7d5e031..c27ce8f375a 100644 --- a/pkg/kubectl/polymorphichelpers/logsforobject_test.go +++ b/pkg/kubectl/polymorphichelpers/logsforobject_test.go @@ -21,16 +21,16 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/diff" fakeexternal "k8s.io/client-go/kubernetes/fake" testclient "k8s.io/client-go/testing" - "k8s.io/kubernetes/pkg/apis/apps" - "k8s.io/kubernetes/pkg/apis/batch" - "k8s.io/kubernetes/pkg/apis/extensions" ) var ( @@ -151,9 +151,9 @@ func TestLogsForObject(t *testing.T) { }, { name: "replica set logs", - obj: &extensions.ReplicaSet{ + obj: &extensionsv1beta1.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: extensions.ReplicaSetSpec{ + Spec: extensionsv1beta1.ReplicaSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -165,9 +165,9 @@ func TestLogsForObject(t *testing.T) { }, { name: "deployment logs", - obj: &extensions.Deployment{ + obj: &extensionsv1beta1.Deployment{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: extensions.DeploymentSpec{ + Spec: extensionsv1beta1.DeploymentSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -179,9 +179,9 @@ func TestLogsForObject(t *testing.T) { }, { name: "job logs", - obj: &batch.Job{ + obj: &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: batch.JobSpec{ + Spec: batchv1.JobSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, @@ -193,9 +193,9 @@ func TestLogsForObject(t *testing.T) { }, { name: "stateful set logs", - obj: &apps.StatefulSet{ + obj: &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, - Spec: apps.StatefulSetSpec{ + Spec: appsv1.StatefulSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, }, }, diff --git a/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject_test.go b/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject_test.go index 59bf5dec444..8734f0c5578 100644 --- a/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject_test.go +++ b/pkg/kubectl/polymorphichelpers/mapbasedselectorforobject_test.go @@ -19,10 +19,10 @@ package polymorphichelpers import ( "testing" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func TestMapBasedSelectorForObject(t *testing.T) { @@ -32,8 +32,8 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectErr bool }{ { - object: &api.ReplicationController{ - Spec: api.ReplicationControllerSpec{ + object: &corev1.ReplicationController{ + Spec: corev1.ReplicationControllerSpec{ Selector: map[string]string{ "foo": "bar", }, @@ -42,11 +42,11 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectSelector: "foo=bar", }, { - object: &api.Pod{}, + object: &corev1.Pod{}, expectErr: true, }, { - object: &api.Pod{ + object: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ "foo": "bar", @@ -56,8 +56,8 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectSelector: "foo=bar", }, { - object: &api.Service{ - Spec: api.ServiceSpec{ + object: &corev1.Service{ + Spec: corev1.ServiceSpec{ Selector: map[string]string{ "foo": "bar", }, @@ -66,12 +66,12 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectSelector: "foo=bar", }, { - object: &api.Service{}, + object: &corev1.Service{}, expectErr: true, }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -82,8 +82,8 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectSelector: "foo=bar", }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -96,8 +96,8 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectErr: true, }, { - object: &extensions.ReplicaSet{ - Spec: extensions.ReplicaSetSpec{ + object: &extensionsv1beta1.ReplicaSet{ + Spec: extensionsv1beta1.ReplicaSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "foo": "bar", @@ -108,8 +108,8 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectSelector: "foo=bar", }, { - object: &extensions.ReplicaSet{ - Spec: extensions.ReplicaSetSpec{ + object: &extensionsv1beta1.ReplicaSet{ + Spec: extensionsv1beta1.ReplicaSetSpec{ Selector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -122,7 +122,7 @@ func TestMapBasedSelectorForObject(t *testing.T) { expectErr: true, }, { - object: &api.Node{}, + object: &corev1.Node{}, expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/objectpauser.go b/pkg/kubectl/polymorphichelpers/objectpauser.go index 8d77ee6e639..61aebba36bf 100644 --- a/pkg/kubectl/polymorphichelpers/objectpauser.go +++ b/pkg/kubectl/polymorphichelpers/objectpauser.go @@ -24,23 +24,13 @@ import ( appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/kubectl/scheme" ) // Currently only supports Deployments. func defaultObjectPauser(obj runtime.Object) ([]byte, error) { switch obj := obj.(type) { - case *extensions.Deployment: - if obj.Spec.Paused { - return nil, errors.New("is already paused") - } - obj.Spec.Paused = true - return runtime.Encode(internalVersionJSONEncoder(), obj) - case *extensionsv1beta1.Deployment: if obj.Spec.Paused { return nil, errors.New("is already paused") @@ -73,8 +63,3 @@ func defaultObjectPauser(obj runtime.Object) ([]byte, error) { return nil, fmt.Errorf("pausing is not supported") } } - -func internalVersionJSONEncoder() runtime.Encoder { - encoder := legacyscheme.Codecs.LegacyCodec(legacyscheme.Scheme.PrioritizedVersionsAllGroups()...) - return unstructured.JSONFallbackEncoder{Encoder: encoder} -} diff --git a/pkg/kubectl/polymorphichelpers/objectpauser_test.go b/pkg/kubectl/polymorphichelpers/objectpauser_test.go index 9ce0dbb3046..c141224f1a7 100644 --- a/pkg/kubectl/polymorphichelpers/objectpauser_test.go +++ b/pkg/kubectl/polymorphichelpers/objectpauser_test.go @@ -20,8 +20,8 @@ import ( "bytes" "testing" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/extensions" ) func TestDefaultObjectPauser(t *testing.T) { @@ -31,8 +31,8 @@ func TestDefaultObjectPauser(t *testing.T) { expectErr bool }{ { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Paused: false, }, }, @@ -40,15 +40,15 @@ func TestDefaultObjectPauser(t *testing.T) { expectErr: false, }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Paused: true, }, }, expectErr: true, }, { - object: &extensions.ReplicaSet{}, + object: &extensionsv1beta1.ReplicaSet{}, expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/objectresumer.go b/pkg/kubectl/polymorphichelpers/objectresumer.go index d3119bc7b5c..90e8b2049c6 100644 --- a/pkg/kubectl/polymorphichelpers/objectresumer.go +++ b/pkg/kubectl/polymorphichelpers/objectresumer.go @@ -25,19 +25,11 @@ import ( appsv1beta2 "k8s.io/api/apps/v1beta2" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/kubectl/scheme" ) func defaultObjectResumer(obj runtime.Object) ([]byte, error) { switch obj := obj.(type) { - case *extensions.Deployment: - if !obj.Spec.Paused { - return nil, errors.New("is not paused") - } - obj.Spec.Paused = false - return runtime.Encode(internalVersionJSONEncoder(), obj) - case *extensionsv1beta1.Deployment: if !obj.Spec.Paused { return nil, errors.New("is not paused") diff --git a/pkg/kubectl/polymorphichelpers/objectresumer_test.go b/pkg/kubectl/polymorphichelpers/objectresumer_test.go index 6488f335f92..408e961a4f8 100644 --- a/pkg/kubectl/polymorphichelpers/objectresumer_test.go +++ b/pkg/kubectl/polymorphichelpers/objectresumer_test.go @@ -20,8 +20,8 @@ import ( "bytes" "testing" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/kubernetes/pkg/apis/extensions" ) func TestDefaultObjectResumer(t *testing.T) { @@ -31,8 +31,8 @@ func TestDefaultObjectResumer(t *testing.T) { expectErr bool }{ { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Paused: true, }, }, @@ -40,15 +40,15 @@ func TestDefaultObjectResumer(t *testing.T) { expectErr: false, }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ Paused: false, }, }, expectErr: true, }, { - object: &extensions.ReplicaSet{}, + object: &extensionsv1beta1.ReplicaSet{}, expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/portsforobject_test.go b/pkg/kubectl/polymorphichelpers/portsforobject_test.go index b1ebb6fe4aa..da5f21e6c01 100644 --- a/pkg/kubectl/polymorphichelpers/portsforobject_test.go +++ b/pkg/kubectl/polymorphichelpers/portsforobject_test.go @@ -21,9 +21,9 @@ import ( "reflect" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func TestPortsForObject(t *testing.T) { @@ -32,11 +32,11 @@ func TestPortsForObject(t *testing.T) { expectErr bool }{ { - object: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, }, @@ -47,9 +47,9 @@ func TestPortsForObject(t *testing.T) { }, }, { - object: &api.Service{ - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{ + object: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { Port: 101, }, @@ -58,13 +58,13 @@ func TestPortsForObject(t *testing.T) { }, }, { - object: &api.ReplicationController{ - Spec: api.ReplicationControllerSpec{ - Template: &api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &corev1.ReplicationController{ + Spec: corev1.ReplicationControllerSpec{ + Template: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, }, @@ -77,13 +77,13 @@ func TestPortsForObject(t *testing.T) { }, }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ - Template: api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, }, @@ -96,13 +96,13 @@ func TestPortsForObject(t *testing.T) { }, }, { - object: &extensions.ReplicaSet{ - Spec: extensions.ReplicaSetSpec{ - Template: api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &extensionsv1beta1.ReplicaSet{ + Spec: extensionsv1beta1.ReplicaSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, }, @@ -115,7 +115,7 @@ func TestPortsForObject(t *testing.T) { }, }, { - object: &api.Node{}, + object: &corev1.Node{}, expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/protocolsforobject_test.go b/pkg/kubectl/polymorphichelpers/protocolsforobject_test.go index 76c00bdcdbb..6a94cef76b9 100644 --- a/pkg/kubectl/polymorphichelpers/protocolsforobject_test.go +++ b/pkg/kubectl/polymorphichelpers/protocolsforobject_test.go @@ -21,9 +21,9 @@ import ( "reflect" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/extensions" ) func TestProtocolsForObject(t *testing.T) { @@ -32,11 +32,11 @@ func TestProtocolsForObject(t *testing.T) { expectErr bool }{ { - object: &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, Protocol: "tcp", @@ -48,9 +48,9 @@ func TestProtocolsForObject(t *testing.T) { }, }, { - object: &api.Service{ - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{ + object: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { Port: 101, Protocol: "tcp", @@ -60,13 +60,13 @@ func TestProtocolsForObject(t *testing.T) { }, }, { - object: &api.ReplicationController{ - Spec: api.ReplicationControllerSpec{ - Template: &api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &corev1.ReplicationController{ + Spec: corev1.ReplicationControllerSpec{ + Template: &corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, Protocol: "tcp", @@ -80,13 +80,13 @@ func TestProtocolsForObject(t *testing.T) { }, }, { - object: &extensions.Deployment{ - Spec: extensions.DeploymentSpec{ - Template: api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &extensionsv1beta1.Deployment{ + Spec: extensionsv1beta1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, Protocol: "tcp", @@ -100,13 +100,13 @@ func TestProtocolsForObject(t *testing.T) { }, }, { - object: &extensions.ReplicaSet{ - Spec: extensions.ReplicaSetSpec{ - Template: api.PodTemplateSpec{ - Spec: api.PodSpec{ - Containers: []api.Container{ + object: &extensionsv1beta1.ReplicaSet{ + Spec: extensionsv1beta1.ReplicaSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ { - Ports: []api.ContainerPort{ + Ports: []corev1.ContainerPort{ { ContainerPort: 101, Protocol: "tcp", @@ -120,7 +120,7 @@ func TestProtocolsForObject(t *testing.T) { }, }, { - object: &api.Node{}, + object: &corev1.Node{}, expectErr: true, }, } diff --git a/pkg/kubectl/polymorphichelpers/statusviewer.go b/pkg/kubectl/polymorphichelpers/statusviewer.go index 740c5612506..08a402b5ec8 100644 --- a/pkg/kubectl/polymorphichelpers/statusviewer.go +++ b/pkg/kubectl/polymorphichelpers/statusviewer.go @@ -18,20 +18,10 @@ package polymorphichelpers import ( "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/kubectl" ) // statusViewer returns a StatusViewer for printing rollout status. -func statusViewer(restClientGetter genericclioptions.RESTClientGetter, mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { - clientConfig, err := restClientGetter.ToRESTConfig() - if err != nil { - return nil, err - } - clientset, err := kubernetes.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) +func statusViewer(mapping *meta.RESTMapping) (kubectl.StatusViewer, error) { + return kubectl.StatusViewerFor(mapping.GroupVersionKind.GroupKind()) } diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index 184cb892494..beb9d353030 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -23,9 +23,6 @@ import ( extensionsv1beta1 "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/kubectl/scheme" ) @@ -36,32 +33,28 @@ type StatusViewer interface { } // StatusViewerFor returns a StatusViewer for the resource specified by kind. -func StatusViewerFor(kind schema.GroupKind, c kubernetes.Interface) (StatusViewer, error) { +func StatusViewerFor(kind schema.GroupKind) (StatusViewer, error) { switch kind { - case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), apps.Kind("Deployment"): - return &DeploymentStatusViewer{c.AppsV1()}, nil - case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), apps.Kind("DaemonSet"): - return &DaemonSetStatusViewer{c.AppsV1()}, nil - case apps.Kind("StatefulSet"): - return &StatefulSetStatusViewer{c.AppsV1()}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("Deployment").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("Deployment").GroupKind(): + return &DeploymentStatusViewer{}, nil + case extensionsv1beta1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(), + appsv1.SchemeGroupVersion.WithKind("DaemonSet").GroupKind(): + return &DaemonSetStatusViewer{}, nil + case appsv1.SchemeGroupVersion.WithKind("StatefulSet").GroupKind(): + return &StatefulSetStatusViewer{}, nil } return nil, fmt.Errorf("no status viewer has been implemented for %v", kind) } // DeploymentStatusViewer implements the StatusViewer interface. -type DeploymentStatusViewer struct { - c clientappsv1.DeploymentsGetter -} +type DeploymentStatusViewer struct{} // DaemonSetStatusViewer implements the StatusViewer interface. -type DaemonSetStatusViewer struct { - c clientappsv1.DaemonSetsGetter -} +type DaemonSetStatusViewer struct{} // StatefulSetStatusViewer implements the StatusViewer interface. -type StatefulSetStatusViewer struct { - c clientappsv1.StatefulSetsGetter -} +type StatefulSetStatusViewer struct{} // Status returns a message describing deployment status, and a bool value indicating if the status is considered done. func (s *DeploymentStatusViewer) Status(obj runtime.Unstructured, revision int64) (string, bool, error) { @@ -141,7 +134,7 @@ func (s *StatefulSetStatusViewer) Status(obj runtime.Unstructured, revision int6 if sts.Spec.Replicas != nil && sts.Status.ReadyReplicas < *sts.Spec.Replicas { return fmt.Sprintf("Waiting for %d pods to be ready...\n", *sts.Spec.Replicas-sts.Status.ReadyReplicas), false, nil } - if sts.Spec.UpdateStrategy.Type == apps.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { + if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType && sts.Spec.UpdateStrategy.RollingUpdate != nil { if sts.Spec.Replicas != nil && sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil { if sts.Status.UpdatedReplicas < (*sts.Spec.Replicas - *sts.Spec.UpdateStrategy.RollingUpdate.Partition) { return fmt.Sprintf("Waiting for partitioned roll out to finish: %d out of %d new pods have been updated...\n", diff --git a/pkg/kubectl/rollout_status_test.go b/pkg/kubectl/rollout_status_test.go index 08ff610dd7c..97238ac7817 100644 --- a/pkg/kubectl/rollout_status_test.go +++ b/pkg/kubectl/rollout_status_test.go @@ -24,7 +24,6 @@ import ( api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes/fake" "k8s.io/kubernetes/pkg/kubectl/scheme" ) @@ -134,8 +133,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { t.Fatal(err) } - client := fake.NewSimpleClientset(d).Apps() - dsv := &DeploymentStatusViewer{c: client} + dsv := &DeploymentStatusViewer{} msg, done, err := dsv.Status(unstructuredD, 0) if err != nil { t.Fatalf("DeploymentStatusViewer.Status(): %v", err) @@ -240,8 +238,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) { t.Fatal(err) } - client := fake.NewSimpleClientset(d).Apps() - dsv := &DaemonSetStatusViewer{c: client} + dsv := &DaemonSetStatusViewer{} msg, done, err := dsv.Status(unstructuredD, 0) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -392,8 +389,7 @@ func TestStatefulSetStatusViewerStatus(t *testing.T) { t.Fatal(err) } - client := fake.NewSimpleClientset(s).AppsV1() - dsv := &StatefulSetStatusViewer{c: client} + dsv := &StatefulSetStatusViewer{} msg, done, err := dsv.Status(unstructuredS, 0) if test.err && err == nil { t.Fatalf("%s: expected error", test.name) @@ -431,8 +427,7 @@ func TestDaemonSetStatusViewerStatusWithWrongUpdateStrategyType(t *testing.T) { t.Fatal(err) } - client := fake.NewSimpleClientset(d).Apps() - dsv := &DaemonSetStatusViewer{c: client} + dsv := &DaemonSetStatusViewer{} msg, done, err := dsv.Status(unstructuredD, 0) errMsg := "rollout status is only available for RollingUpdate strategy type" if err == nil || err.Error() != errMsg { diff --git a/pkg/kubectl/scheme/scheme.go b/pkg/kubectl/scheme/scheme.go index 880b115b178..a7b0833f6d1 100644 --- a/pkg/kubectl/scheme/scheme.go +++ b/pkg/kubectl/scheme/scheme.go @@ -30,6 +30,9 @@ var Scheme = runtime.NewScheme() // Codecs provides access to encoding and decoding for the scheme var Codecs = serializer.NewCodecFactory(Scheme) +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + // DefaultJSONEncoder returns a default encoder for our scheme func DefaultJSONEncoder() runtime.Encoder { return unstructured.JSONFallbackEncoder{Encoder: Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)} diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 62e4602f929..55996099478 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -11,6 +11,7 @@ go_library( srcs = [ "active_deadline.go", "doc.go", + "errors.go", "kubelet.go", "kubelet_getters.go", "kubelet_network.go", @@ -210,7 +211,7 @@ go_test( "//pkg/util/taints:go_default_library", "//pkg/version:go_default_library", "//pkg/volume:go_default_library", - "//pkg/volume/aws_ebs:go_default_library", + "//pkg/volume/awsebs:go_default_library", "//pkg/volume/azure_dd:go_default_library", "//pkg/volume/gce_pd:go_default_library", "//pkg/volume/host_path:go_default_library", diff --git a/pkg/kubelet/checkpoint/checkpoint.go b/pkg/kubelet/checkpoint/checkpoint.go index e5801312d6a..bf84178dce8 100644 --- a/pkg/kubelet/checkpoint/checkpoint.go +++ b/pkg/kubelet/checkpoint/checkpoint.go @@ -82,7 +82,7 @@ func checkAnnotations(pod *v1.Pod) bool { //getPodKey returns the full qualified path for the pod checkpoint func getPodKey(pod *v1.Pod) string { - return fmt.Sprintf("Pod%v%v.yaml", delimiter, pod.GetUID()) + return fmt.Sprintf("%s%s%v.yaml", podPrefix, delimiter, pod.GetUID()) } // LoadPods Loads All Checkpoints from disk diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 6800caf05fd..12c9d2f2895 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -77,8 +77,8 @@ go_library( "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/docker/go-units:go_default_library", diff --git a/pkg/kubelet/cm/cgroup_manager_linux.go b/pkg/kubelet/cm/cgroup_manager_linux.go index 6a4d65cd858..d0eb6b6d27b 100644 --- a/pkg/kubelet/cm/cgroup_manager_linux.go +++ b/pkg/kubelet/cm/cgroup_manager_linux.go @@ -204,18 +204,16 @@ func NewCgroupManager(cs *CgroupSubsystems, cgroupDriver string) CgroupManager { func (m *cgroupManagerImpl) Name(name CgroupName) string { if m.adapter.cgroupManagerType == libcontainerSystemd { return name.ToSystemd() - } else { - return name.ToCgroupfs() } + return name.ToCgroupfs() } // CgroupName converts the literal cgroupfs name on the host to an internal identifier. func (m *cgroupManagerImpl) CgroupName(name string) CgroupName { if m.adapter.cgroupManagerType == libcontainerSystemd { return ParseSystemdToCgroupName(name) - } else { - return ParseCgroupfsToCgroupName(name) } + return ParseCgroupfsToCgroupName(name) } // buildCgroupPaths builds a path to each cgroup subsystem for the specified name. @@ -257,6 +255,7 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { // once resolved, we can remove this code. whitelistControllers := sets.NewString("cpu", "cpuacct", "cpuset", "memory", "systemd") + var missingPaths []string // If even one cgroup path doesn't exist, then the cgroup doesn't exist. for controller, path := range cgroupPaths { // ignore mounts we don't care about @@ -264,10 +263,15 @@ func (m *cgroupManagerImpl) Exists(name CgroupName) bool { continue } if !libcontainercgroups.PathExists(path) { - return false + missingPaths = append(missingPaths, path) } } + if len(missingPaths) > 0 { + glog.V(4).Infof("The Cgroup %v has some missing paths: %v", name, missingPaths) + return false + } + return true } diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index 132e2de9abe..96506163e4d 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" @@ -59,7 +60,6 @@ import ( "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( diff --git a/pkg/kubelet/cm/pod_container_manager_linux.go b/pkg/kubelet/cm/pod_container_manager_linux.go index d0c3a829947..844f63986de 100644 --- a/pkg/kubelet/cm/pod_container_manager_linux.go +++ b/pkg/kubelet/cm/pod_container_manager_linux.go @@ -139,9 +139,8 @@ func (m *podContainerManagerImpl) killOnePid(pid int) error { // also does this. glog.V(3).Infof("process with pid %v no longer exists", pid) return nil - } else { - return err } + return err } return nil } diff --git a/pkg/kubelet/config/common_test.go b/pkg/kubelet/config/common_test.go index 87820f6a546..709fdcf3315 100644 --- a/pkg/kubelet/config/common_test.go +++ b/pkg/kubelet/config/common_test.go @@ -37,6 +37,7 @@ func noDefault(*core.Pod) error { return nil } func TestDecodeSinglePod(t *testing.T) { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "", @@ -58,8 +59,9 @@ func TestDecodeSinglePod(t *testing.T) { TerminationMessagePolicy: v1.TerminationMessageReadFile, SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }}, - SecurityContext: &v1.PodSecurityContext{}, - SchedulerName: core.DefaultSchedulerName, + SecurityContext: &v1.PodSecurityContext{}, + SchedulerName: core.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } json, err := runtime.Encode(testapi.Default.Codec(), pod) @@ -99,6 +101,7 @@ func TestDecodeSinglePod(t *testing.T) { func TestDecodePodList(t *testing.T) { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "", @@ -121,8 +124,9 @@ func TestDecodePodList(t *testing.T) { SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), }}, - SecurityContext: &v1.PodSecurityContext{}, - SchedulerName: core.DefaultSchedulerName, + SecurityContext: &v1.PodSecurityContext{}, + SchedulerName: core.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } podList := &v1.PodList{ diff --git a/pkg/kubelet/config/file_linux_test.go b/pkg/kubelet/config/file_linux_test.go index b449b8aafa9..4c26b0ed819 100644 --- a/pkg/kubelet/config/file_linux_test.go +++ b/pkg/kubelet/config/file_linux_test.go @@ -140,6 +140,7 @@ type testCase struct { func getTestCases(hostname types.NodeName) []*testCase { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks return []*testCase{ { lock: &sync.Mutex{}, @@ -188,8 +189,9 @@ func getTestCases(hostname types.NodeName) []*testCase { SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), TerminationMessagePolicy: v1.TerminationMessageReadFile, }}, - SecurityContext: &v1.PodSecurityContext{}, - SchedulerName: api.DefaultSchedulerName, + SecurityContext: &v1.PodSecurityContext{}, + SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, Status: v1.PodStatus{ Phase: v1.PodPending, diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index f4dff088748..3b7f00b0be6 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -129,6 +129,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { nodeName := "different-value" grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks var testCases = []struct { desc string pods runtime.Object @@ -173,6 +174,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { SecurityContext: &v1.PodSecurityContext{}, TerminationGracePeriodSeconds: &grace, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, Containers: []v1.Container{{ Name: "1", @@ -244,6 +246,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { TerminationGracePeriodSeconds: &grace, SecurityContext: &v1.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, Containers: []v1.Container{{ Name: "1", @@ -272,6 +275,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { TerminationGracePeriodSeconds: &grace, SecurityContext: &v1.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, Containers: []v1.Container{{ Name: "2", diff --git a/pkg/kubelet/dockershim/cm/BUILD b/pkg/kubelet/dockershim/cm/BUILD index 5ca2b2afc63..754952527a9 100644 --- a/pkg/kubelet/dockershim/cm/BUILD +++ b/pkg/kubelet/dockershim/cm/BUILD @@ -31,7 +31,7 @@ go_library( "//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/dockershim/libdocker:go_default_library", "//pkg/kubelet/qos:go_default_library", - "//pkg/util/version:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", diff --git a/pkg/kubelet/dockershim/cm/container_manager_linux.go b/pkg/kubelet/dockershim/cm/container_manager_linux.go index 17baaee11ef..8484fd2d873 100644 --- a/pkg/kubelet/dockershim/cm/container_manager_linux.go +++ b/pkg/kubelet/dockershim/cm/container_manager_linux.go @@ -28,10 +28,10 @@ import ( "github.com/golang/glog" "github.com/opencontainers/runc/libcontainer/cgroups/fs" "github.com/opencontainers/runc/libcontainer/configs" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/qos" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker" ) @@ -101,8 +101,15 @@ func (m *containerManager) doWork() { func createCgroupManager(name string) (*fs.Manager, error) { var memoryLimit uint64 + memoryCapacity, err := getMemoryCapacity() - if err != nil || memoryCapacity*dockerMemoryLimitThresholdPercent/100 < minDockerMemoryLimit { + if err != nil { + glog.Errorf("Failed to get the memory capacity on machine: %v", err) + } else { + memoryLimit = memoryCapacity * dockerMemoryLimitThresholdPercent / 100 + } + + if err != nil || memoryLimit < minDockerMemoryLimit { memoryLimit = minDockerMemoryLimit } glog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit) diff --git a/pkg/kubelet/dockershim/helpers.go b/pkg/kubelet/dockershim/helpers.go index 05d23c75b29..6719422d0d9 100644 --- a/pkg/kubelet/dockershim/helpers.go +++ b/pkg/kubelet/dockershim/helpers.go @@ -344,7 +344,7 @@ func ensureSandboxImageExists(client libdocker.Interface, image string) error { var pullErrs []error for _, currentCreds := range creds { - authConfig := credentialprovider.LazyProvide(currentCreds) + authConfig := dockertypes.AuthConfig(credentialprovider.LazyProvide(currentCreds)) err := client.PullImage(image, authConfig, dockertypes.ImagePullOptions{}) // If there was no error, return success if err == nil { diff --git a/pkg/kubelet/dockershim/metrics/metrics.go b/pkg/kubelet/dockershim/metrics/metrics.go index 2ce548f76a6..907647970d5 100644 --- a/pkg/kubelet/dockershim/metrics/metrics.go +++ b/pkg/kubelet/dockershim/metrics/metrics.go @@ -30,7 +30,7 @@ const ( DockerOperationsLatencyKey = "docker_operations_latency_microseconds" // DockerOperationsErrorsKey is the key for the operation error metrics. DockerOperationsErrorsKey = "docker_operations_errors" - // DockerOperationsTimeoutKey is the key for the operation timoeut metrics. + // DockerOperationsTimeoutKey is the key for the operation timeout metrics. DockerOperationsTimeoutKey = "docker_operations_timeout" // Keep the "kubelet" subsystem for backward compatibility. diff --git a/pkg/kubelet/dockershim/security_context.go b/pkg/kubelet/dockershim/security_context.go index e2724357136..17969a1172f 100644 --- a/pkg/kubelet/dockershim/security_context.go +++ b/pkg/kubelet/dockershim/security_context.go @@ -137,8 +137,10 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig * hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges") } - hostConfig.MaskedPaths = sc.MaskedPaths - hostConfig.ReadonlyPaths = sc.ReadonlyPaths + if !hostConfig.Privileged { + hostConfig.MaskedPaths = sc.MaskedPaths + hostConfig.ReadonlyPaths = sc.ReadonlyPaths + } return nil } diff --git a/pkg/kubelet/dockershim/security_context_test.go b/pkg/kubelet/dockershim/security_context_test.go index 59876e72a22..3fe3646906d 100644 --- a/pkg/kubelet/dockershim/security_context_test.go +++ b/pkg/kubelet/dockershim/security_context_test.go @@ -110,11 +110,27 @@ func TestModifyContainerConfig(t *testing.T) { func TestModifyHostConfig(t *testing.T) { setNetworkHC := &dockercontainer.HostConfig{} + + // When we have Privileged pods, we do not need to use the + // Masked / Readonly paths. setPrivSC := &runtimeapi.LinuxContainerSecurityContext{} setPrivSC.Privileged = true + setPrivSC.MaskedPaths = []string{"/hello/world/masked"} + setPrivSC.ReadonlyPaths = []string{"/hello/world/readonly"} setPrivHC := &dockercontainer.HostConfig{ Privileged: true, } + + unsetPrivSC := &runtimeapi.LinuxContainerSecurityContext{} + unsetPrivSC.Privileged = false + unsetPrivSC.MaskedPaths = []string{"/hello/world/masked"} + unsetPrivSC.ReadonlyPaths = []string{"/hello/world/readonly"} + unsetPrivHC := &dockercontainer.HostConfig{ + Privileged: false, + MaskedPaths: []string{"/hello/world/masked"}, + ReadonlyPaths: []string{"/hello/world/readonly"}, + } + setCapsHC := &dockercontainer.HostConfig{ CapAdd: []string{"addCapA", "addCapB"}, CapDrop: []string{"dropCapA", "dropCapB"}, @@ -148,6 +164,11 @@ func TestModifyHostConfig(t *testing.T) { sc: setPrivSC, expected: setPrivHC, }, + { + name: "container.SecurityContext.NoPrivileges", + sc: unsetPrivSC, + expected: unsetPrivHC, + }, { name: "container.SecurityContext.Capabilities", sc: &runtimeapi.LinuxContainerSecurityContext{ diff --git a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_windows.go b/pkg/kubelet/errors.go similarity index 69% rename from cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_windows.go rename to pkg/kubelet/errors.go index 779dd5290c3..eae36e5ee59 100644 --- a/cmd/kubeadm/app/apis/kubeadm/v1alpha2/defaults_windows.go +++ b/pkg/kubelet/errors.go @@ -1,5 +1,3 @@ -// +build windows - /* Copyright 2018 The Kubernetes Authors. @@ -16,11 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha2 +package kubelet const ( - // DefaultCACertPath defines default location of CA certificate on Windows - DefaultCACertPath = "C:/etc/kubernetes/pki/ca.crt" - // DefaultCRISocket defines the default cri socket - DefaultCRISocket = "tcp://localhost:2375" + NetworkNotReadyErrorMsg = "network is not ready" ) diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 0ed5a1b673f..ab960a4a546 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -1011,6 +1011,10 @@ func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) { return f.result, nil } +func (f *fakeSummaryProvider) GetCPUAndMemoryStats() (*statsapi.Summary, error) { + return f.result, nil +} + // newPodStats returns a pod stat where each container is using the specified working set // each pod must have a Name, UID, Namespace func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats { diff --git a/pkg/kubelet/images/helpers.go b/pkg/kubelet/images/helpers.go index b8b5bd3824f..231ba730ddf 100644 --- a/pkg/kubelet/images/helpers.go +++ b/pkg/kubelet/images/helpers.go @@ -46,5 +46,5 @@ func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets if ts.limiter.TryAccept() { return ts.ImageService.PullImage(image, secrets) } - return "", fmt.Errorf("pull QPS exceeded.") + return "", fmt.Errorf("pull QPS exceeded") } diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index 2c58cb1cfbe..6d0b9be3e9f 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -44,8 +44,7 @@ type StatsProvider interface { ImageFsStats() (*statsapi.FsStats, error) } -// Manages lifecycle of all images. -// +// ImageGCManager is an interface for managing lifecycle of all images. // Implementation is thread-safe. type ImageGCManager interface { // Applies the garbage collection policy. Errors include being unable to free @@ -61,7 +60,7 @@ type ImageGCManager interface { DeleteUnusedImages() error } -// A policy for garbage collecting images. Policy defines an allowed band in +// ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in // which garbage collection will be run. type ImageGCPolicy struct { // Any usage above this threshold will always trigger garbage collection. @@ -144,6 +143,7 @@ type imageRecord struct { size int64 } +// NewImageGCManager instantiates a new ImageGCManager object. func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy, sandboxImage string) (ImageGCManager, error) { // Validate policy. if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 { diff --git a/pkg/kubelet/images/image_manager.go b/pkg/kubelet/images/image_manager.go index 413df5a727d..381c55e4084 100644 --- a/pkg/kubelet/images/image_manager.go +++ b/pkg/kubelet/images/image_manager.go @@ -40,6 +40,7 @@ type imageManager struct { var _ ImageManager = &imageManager{} +// NewImageManager instantiates a new ImageManager object. func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.ImageService, imageBackOff *flowcontrol.Backoff, serialized bool, qps float32, burst int) ImageManager { imageService = throttleImagePulling(imageService, qps, burst) @@ -112,11 +113,10 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p msg := fmt.Sprintf("Container image %q already present on machine", container.Image) m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info) return imageRef, "", nil - } else { - msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) - m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) - return "", msg, ErrImageNeverPull } + msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image) + m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning) + return "", msg, ErrImageNeverPull } backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image) @@ -132,7 +132,7 @@ func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, p if imagePullResult.err != nil { m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), glog.Warning) m.backOff.Next(backOffKey, m.backOff.Clock.Now()) - if imagePullResult.err == RegistryUnavailable { + if imagePullResult.err == ErrRegistryUnavailable { msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image) return "", msg, imagePullResult.err } diff --git a/pkg/kubelet/images/types.go b/pkg/kubelet/images/types.go index 105906b5ccf..897655c66f1 100644 --- a/pkg/kubelet/images/types.go +++ b/pkg/kubelet/images/types.go @@ -23,22 +23,22 @@ import ( ) var ( - // Container image pull failed, kubelet is backing off image pull + // ErrImagePullBackOff - Container image pull failed, kubelet is backing off image pull ErrImagePullBackOff = errors.New("ImagePullBackOff") - // Unable to inspect image + // ErrImageInspect - Unable to inspect image ErrImageInspect = errors.New("ImageInspectError") - // General image pull error + // ErrImagePull - General image pull error ErrImagePull = errors.New("ErrImagePull") - // Required Image is absent on host and PullPolicy is NeverPullImage + // ErrImageNeverPull - Required Image is absent on host and PullPolicy is NeverPullImage ErrImageNeverPull = errors.New("ErrImageNeverPull") - // Get http error when pulling image from registry - RegistryUnavailable = errors.New("RegistryUnavailable") + // ErrRegistryUnavailable - Get http error when pulling image from registry + ErrRegistryUnavailable = errors.New("RegistryUnavailable") - // Unable to parse the image name. + // ErrInvalidImageName - Unable to parse the image name. ErrInvalidImageName = errors.New("InvalidImageName") ) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index d740861eae4..27c0a835aa1 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -690,6 +690,12 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.streamingRuntime = runtime klet.runner = runtime + runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) + if err != nil { + return nil, err + } + klet.runtimeCache = runtimeCache + if cadvisor.UsingLegacyCadvisorStats(containerRuntime, remoteRuntimeEndpoint) { klet.StatsProvider = stats.NewCadvisorStatsProvider( klet.cadvisor, @@ -807,11 +813,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, experimentalCheckNodeCapabilitiesBeforeMount, keepTerminatedPodVolumes) - runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime) - if err != nil { - return nil, err - } - klet.runtimeCache = runtimeCache klet.reasonCache = NewReasonCache() klet.workQueue = queue.NewBasicWorkQueue(klet.clock) klet.podWorkers = newPodWorkers(klet.syncPod, kubeDeps.Recorder, klet.workQueue, klet.resyncInterval, backOffPeriod, klet.podCache) @@ -1560,8 +1561,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { // If the network plugin is not ready, only start the pod if it uses the host network if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) { - kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "network is not ready: %v", rs) - return fmt.Errorf("network is not ready: %v", rs) + kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, rs) + return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, rs) } // Create Cgroups for the pod and apply resource parameters diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 54f54f149ae..096dfd83060 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -151,7 +151,7 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool kubeletapis.LabelArch, } - var needsUpdate bool = false + needsUpdate := false if existingNode.Labels == nil { existingNode.Labels = make(map[string]string) } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 1fed05dfb52..a00b934df6a 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -138,7 +138,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows" glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile) mounts := []kubecontainer.Mount{} - var cleanupAction func() = nil + var cleanupAction func() for i, mount := range container.VolumeMounts { // do not mount /etc/hosts if container is already mounting on the path mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath) @@ -232,7 +232,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h } glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation) - mustMountRO := vol.Mounter.GetAttributes().ReadOnly && utilfeature.DefaultFeatureGate.Enabled(features.ReadOnlyAPIDataVolumes) + mustMountRO := vol.Mounter.GetAttributes().ReadOnly mounts = append(mounts, kubecontainer.Mount{ Name: mount.Name, @@ -488,7 +488,7 @@ var masterServices = sets.NewString("kubernetes") // getServiceEnvVarMap makes a map[string]string of env vars for services a // pod in namespace ns should see. -func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { +func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) { var ( serviceMap = make(map[string]*v1.Service) m = make(map[string]string) @@ -514,19 +514,16 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { } serviceName := service.Name - switch service.Namespace { - // for the case whether the master service namespace is the namespace the pod - // is in, the pod should receive all the services in the namespace. - // - // ordering of the case clauses below enforces this - case ns: - serviceMap[serviceName] = service - case kl.masterServiceNamespace: - if masterServices.Has(serviceName) { - if _, exists := serviceMap[serviceName]; !exists { - serviceMap[serviceName] = service - } + // We always want to add environment variabled for master services + // from the master service namespace, even if enableServiceLinks is false. + // We also add environment variables for other services in the same + // namespace, if enableServiceLinks is true. + if service.Namespace == kl.masterServiceNamespace && masterServices.Has(serviceName) { + if _, exists := serviceMap[serviceName]; !exists { + serviceMap[serviceName] = service } + } else if service.Namespace == ns && enableServiceLinks { + serviceMap[serviceName] = service } } @@ -553,7 +550,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container // To avoid this users can: (1) wait between starting a service and starting; or (2) detect // missing service env var and exit and be restarted; or (3) use DNS instead of env vars // and keep trying to resolve the DNS name of the service (recommended). - serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace) + serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks) if err != nil { return result, err } diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 563d1690af7..3e34435adb8 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -36,6 +36,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" + // TODO: remove this import if // api.Registry.GroupOrDie(v1.GroupName).GroupVersions[0].String() is changed // to "v1"? @@ -439,20 +440,22 @@ func TestMakeEnvironmentVariables(t *testing.T) { } testCases := []struct { - name string // the name of the test case - ns string // the namespace to generate environment for - container *v1.Container // the container to use - masterServiceNs string // the namespace to read master service info from - nilLister bool // whether the lister should be nil - configMap *v1.ConfigMap // an optional ConfigMap to pull from - secret *v1.Secret // an optional Secret to pull from - expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars - expectedError bool // does the test fail - expectedEvent string // does the test emit an event + name string // the name of the test case + ns string // the namespace to generate environment for + enableServiceLinks bool // enabling service links + container *v1.Container // the container to use + masterServiceNs string // the namespace to read master service info from + nilLister bool // whether the lister should be nil + configMap *v1.ConfigMap // an optional ConfigMap to pull from + secret *v1.Secret // an optional Secret to pull from + expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars + expectedError bool // does the test fail + expectedEvent string // does the test emit an event }{ { - name: "api server = Y, kubelet = Y", - ns: "test1", + name: "api server = Y, kubelet = Y", + ns: "test1", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -486,8 +489,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "api server = Y, kubelet = N", - ns: "test1", + name: "api server = Y, kubelet = N", + ns: "test1", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ {Name: "FOO", Value: "BAR"}, @@ -514,8 +518,31 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "api server = N; kubelet = Y", - ns: "test1", + name: "api server = N; kubelet = Y", + ns: "test1", + enableServiceLinks: false, + container: &v1.Container{ + Env: []v1.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + }, + }, + masterServiceNs: metav1.NamespaceDefault, + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "BAZ"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"}, + {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"}, + }, + }, + { + name: "api server = N; kubelet = Y; service env vars", + ns: "test1", + enableServiceLinks: true, container: &v1.Container{ Env: []v1.EnvVar{ {Name: "FOO", Value: "BAZ"}, @@ -542,8 +569,31 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "master service in pod ns", - ns: "test2", + name: "master service in pod ns", + ns: "test2", + enableServiceLinks: false, + container: &v1.Container{ + Env: []v1.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + }, + }, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "FOO", Value: "ZAP"}, + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"}, + }, + }, + { + name: "master service in pod ns, service env vars", + ns: "test2", + enableServiceLinks: true, container: &v1.Container{ Env: []v1.EnvVar{ {Name: "FOO", Value: "ZAP"}, @@ -570,11 +620,29 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "pod in master service ns", - ns: "kubernetes", - container: &v1.Container{}, - masterServiceNs: "kubernetes", - nilLister: false, + name: "pod in master service ns", + ns: "kubernetes", + enableServiceLinks: false, + container: &v1.Container{}, + masterServiceNs: "kubernetes", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"}, + {Name: "KUBERNETES_SERVICE_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"}, + {Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"}, + {Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"}, + }, + }, + { + name: "pod in master service ns, service env vars", + ns: "kubernetes", + enableServiceLinks: true, + container: &v1.Container{}, + masterServiceNs: "kubernetes", + nilLister: false, expectedEnvs: []kubecontainer.EnvVar{ {Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"}, {Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"}, @@ -593,8 +661,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "downward api pod", - ns: "downward-api", + name: "downward api pod", + ns: "downward-api", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -665,8 +734,105 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "env expansion", - ns: "test1", + name: "env expansion", + ns: "test1", + enableServiceLinks: false, + container: &v1.Container{ + Env: []v1.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + APIVersion: "v1", //legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(), + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-$(EMPTY_VAR)", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-$(POD_NAME)", + }, + { + Name: "POD_NAME_TEST3", + Value: "$(POD_NAME_TEST2)-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-$(TEST_LITERAL)", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + }, + }, + masterServiceNs: "nothing", + nilLister: false, + expectedEnvs: []kubecontainer.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "POD_NAME", + Value: "dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST2", + Value: "test2-dapi-test-pod-name", + }, + { + Name: "POD_NAME_TEST3", + Value: "test2-dapi-test-pod-name-3", + }, + { + Name: "LITERAL_TEST", + Value: "literal-test-test-test", + }, + { + Name: "OUT_OF_ORDER_TEST", + Value: "$(OUT_OF_ORDER_TARGET)", + }, + { + Name: "OUT_OF_ORDER_TARGET", + Value: "FOO", + }, + { + Name: "TEST_UNDEFINED", + Value: "$(UNDEFINED_VAR)", + }, + { + Name: "EMPTY_VAR", + }, + { + Name: "EMPTY_TEST", + Value: "foo-", + }, + }, + }, + { + name: "env expansion, service env vars", + ns: "test1", + enableServiceLinks: true, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -796,8 +962,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "configmapkeyref_missing_optional", - ns: "test", + name: "configmapkeyref_missing_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -816,8 +983,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "configmapkeyref_missing_key_optional", - ns: "test", + name: "configmapkeyref_missing_key_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -846,8 +1014,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "secretkeyref_missing_optional", - ns: "test", + name: "secretkeyref_missing_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -866,8 +1035,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "secretkeyref_missing_key_optional", - ns: "test", + name: "secretkeyref_missing_key_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ Env: []v1.EnvVar{ { @@ -896,8 +1066,77 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "configmap", - ns: "test1", + name: "configmap", + ns: "test1", + enableServiceLinks: false, + container: &v1.Container{ + EnvFrom: []v1.EnvFromSource{ + { + ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}, + }, + { + Prefix: "p_", + ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}, + }, + }, + Env: []v1.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "EXPANSION_TEST", + Value: "$(REPLACE_ME)", + }, + { + Name: "DUPE_TEST", + Value: "ENV_VAR", + }, + }, + }, + masterServiceNs: "nothing", + nilLister: false, + configMap: &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test1", + Name: "test-configmap", + }, + Data: map[string]string{ + "REPLACE_ME": "FROM_CONFIG_MAP", + "DUPE_TEST": "CONFIG_MAP", + }, + }, + expectedEnvs: []kubecontainer.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "REPLACE_ME", + Value: "FROM_CONFIG_MAP", + }, + { + Name: "EXPANSION_TEST", + Value: "FROM_CONFIG_MAP", + }, + { + Name: "DUPE_TEST", + Value: "ENV_VAR", + }, + { + Name: "p_REPLACE_ME", + Value: "FROM_CONFIG_MAP", + }, + { + Name: "p_DUPE_TEST", + Value: "CONFIG_MAP", + }, + }, + }, + { + name: "configmap, service env vars", + ns: "test1", + enableServiceLinks: true, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ { @@ -991,8 +1230,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "configmap_missing", - ns: "test1", + name: "configmap_missing", + ns: "test1", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}}, @@ -1002,8 +1242,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedError: true, }, { - name: "configmap_missing_optional", - ns: "test", + name: "configmap_missing_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {ConfigMapRef: &v1.ConfigMapEnvSource{ @@ -1015,8 +1256,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "configmap_invalid_keys", - ns: "test", + name: "configmap_invalid_keys", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}}, @@ -1043,8 +1285,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEvent: "Warning InvalidEnvironmentVariableNames Keys [1234, 1z] from the EnvFrom configMap test/test-config-map were skipped since they are considered invalid environment variable names.", }, { - name: "configmap_invalid_keys_valid", - ns: "test", + name: "configmap_invalid_keys_valid", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ { @@ -1071,8 +1314,77 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "secret", - ns: "test1", + name: "secret", + ns: "test1", + enableServiceLinks: false, + container: &v1.Container{ + EnvFrom: []v1.EnvFromSource{ + { + SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}, + }, + { + Prefix: "p_", + SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}, + }, + }, + Env: []v1.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "EXPANSION_TEST", + Value: "$(REPLACE_ME)", + }, + { + Name: "DUPE_TEST", + Value: "ENV_VAR", + }, + }, + }, + masterServiceNs: "nothing", + nilLister: false, + secret: &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test1", + Name: "test-secret", + }, + Data: map[string][]byte{ + "REPLACE_ME": []byte("FROM_SECRET"), + "DUPE_TEST": []byte("SECRET"), + }, + }, + expectedEnvs: []kubecontainer.EnvVar{ + { + Name: "TEST_LITERAL", + Value: "test-test-test", + }, + { + Name: "REPLACE_ME", + Value: "FROM_SECRET", + }, + { + Name: "EXPANSION_TEST", + Value: "FROM_SECRET", + }, + { + Name: "DUPE_TEST", + Value: "ENV_VAR", + }, + { + Name: "p_REPLACE_ME", + Value: "FROM_SECRET", + }, + { + Name: "p_DUPE_TEST", + Value: "SECRET", + }, + }, + }, + { + name: "secret, service env vars", + ns: "test1", + enableServiceLinks: true, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ { @@ -1166,8 +1478,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { }, }, { - name: "secret_missing", - ns: "test1", + name: "secret_missing", + ns: "test1", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}}, @@ -1177,8 +1490,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedError: true, }, { - name: "secret_missing_optional", - ns: "test", + name: "secret_missing_optional", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {SecretRef: &v1.SecretEnvSource{ @@ -1190,8 +1504,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEnvs: nil, }, { - name: "secret_invalid_keys", - ns: "test", + name: "secret_invalid_keys", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ {SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}}, @@ -1218,8 +1533,9 @@ func TestMakeEnvironmentVariables(t *testing.T) { expectedEvent: "Warning InvalidEnvironmentVariableNames Keys [1234, 1z] from the EnvFrom secret test/test-secret were skipped since they are considered invalid environment variable names.", }, { - name: "secret_invalid_keys_valid", - ns: "test", + name: "secret_invalid_keys_valid", + ns: "test", + enableServiceLinks: false, container: &v1.Container{ EnvFrom: []v1.EnvFromSource{ { @@ -1291,6 +1607,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Spec: v1.PodSpec{ ServiceAccountName: "special", NodeName: "node-name", + EnableServiceLinks: &tc.enableServiceLinks, }, } podIP := "1.2.3.4" diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 509a1f532fe..de8fb60969e 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -68,7 +68,7 @@ import ( schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/aws_ebs" + "k8s.io/kubernetes/pkg/volume/awsebs" "k8s.io/kubernetes/pkg/volume/azure_dd" "k8s.io/kubernetes/pkg/volume/gce_pd" _ "k8s.io/kubernetes/pkg/volume/host_path" @@ -313,12 +313,12 @@ func newTestKubeletWithImageList( if initFakeVolumePlugin { allPlugins = append(allPlugins, plug) } else { - allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, awsebs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...) allPlugins = append(allPlugins, azure_dd.ProbeVolumePlugins()...) } - var prober volume.DynamicPluginProber = nil // TODO (#51147) inject mock + var prober volume.DynamicPluginProber // TODO (#51147) inject mock kubelet.volumePluginMgr, err = NewInitializedVolumePluginMgr(kubelet, kubelet.secretManager, kubelet.configMapManager, token.NewManager(kubelet.kubeClient), allPlugins, prober) require.NoError(t, err, "Failed to initialize VolumePluginMgr") diff --git a/pkg/kubelet/kuberuntime/BUILD b/pkg/kubelet/kuberuntime/BUILD index d1045f5a4af..67c04164816 100644 --- a/pkg/kubelet/kuberuntime/BUILD +++ b/pkg/kubelet/kuberuntime/BUILD @@ -54,13 +54,13 @@ go_library( "//pkg/util/parsers:go_default_library", "//pkg/util/selinux:go_default_library", "//pkg/util/tail:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/tools/reference:go_default_library", diff --git a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 6eb64ee7b02..683f1304317 100644 --- a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -70,7 +70,7 @@ func (f *fakePodStateProvider) IsPodTerminated(uid types.UID) bool { return !found } -func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { +func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { recorder := &record.FakeRecorder{} kubeRuntimeManager := &kubeGenericRuntimeManager{ recorder: recorder, @@ -93,7 +93,7 @@ func NewFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS return nil, err } - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) + kubeRuntimeManager.containerGC = newContainerGC(runtimeService, newFakePodStateProvider(), kubeRuntimeManager) kubeRuntimeManager.runtimeName = typedVersion.RuntimeName kubeRuntimeManager.imagePuller = images.NewImageManager( kubecontainer.FilterEventRecorder(recorder), diff --git a/pkg/kubelet/kuberuntime/instrumented_services_test.go b/pkg/kubelet/kuberuntime/instrumented_services_test.go index d56d23ca96a..6463e617f06 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services_test.go +++ b/pkg/kubelet/kuberuntime/instrumented_services_test.go @@ -38,7 +38,7 @@ func TestRecordOperation(t *testing.T) { assert.NoError(t, err) defer l.Close() - prometheusUrl := "http://" + temporalServer + "/metrics" + prometheusURL := "http://" + temporalServer + "/metrics" mux := http.NewServeMux() mux.Handle("/metrics", prometheus.Handler()) server := &http.Server{ @@ -55,11 +55,11 @@ func TestRecordOperation(t *testing.T) { assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mux.ServeHTTP(w, r) - }), "GET", prometheusUrl, nil, runtimeOperationsCounterExpected) + }), "GET", prometheusURL, nil, runtimeOperationsCounterExpected) assert.HTTPBodyContains(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mux.ServeHTTP(w, r) - }), "GET", prometheusUrl, nil, runtimeOperationsLatencyExpected) + }), "GET", prometheusURL, nil, runtimeOperationsLatencyExpected) } func TestInstrumentedVersion(t *testing.T) { diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 5575c45181e..929afc7d8aa 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -50,10 +50,14 @@ import ( ) var ( + // ErrCreateContainerConfig - failed to create container config ErrCreateContainerConfig = errors.New("CreateContainerConfigError") - ErrCreateContainer = errors.New("CreateContainerError") - ErrPreStartHook = errors.New("PreStartHookError") - ErrPostStartHook = errors.New("PostStartHookError") + // ErrCreateContainer - failed to create container + ErrCreateContainer = errors.New("CreateContainerError") + // ErrPreStartHook - failed to execute PreStartHook + ErrPreStartHook = errors.New("PreStartHookError") + // ErrPostStartHook - failed to execute PostStartHook + ErrPostStartHook = errors.New("PostStartHookError") ) // recordContainerEvent should be used by the runtime manager for all container related events. diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index d04b20f741f..ae1f66149c1 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -124,8 +124,8 @@ func TestGenerateContainerConfig(t *testing.T) { _, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, kubecontainer.ContainerTypeRegular) assert.Error(t, err) - imageId, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil) - image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageId}) + imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil) + image, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}) image.Uid = nil image.Username = "test" diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 64845b8c9ad..01df93d24fb 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -56,17 +56,17 @@ func TestRemoveContainer(t *testing.T) { _, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod) assert.Equal(t, len(fakeContainers), 1) - containerId := fakeContainers[0].Id + containerID := fakeContainers[0].Id fakeOS := m.osInterface.(*containertest.FakeOS) - err = m.removeContainer(containerId) + err = m.removeContainer(containerID) assert.NoError(t, err) // Verify container log is removed expectedContainerLogPath := filepath.Join(podLogsRootDirectory, "12345678", "foo", "0.log") - expectedContainerLogSymlink := legacyLogSymlink(containerId, "foo", "bar", "new") + expectedContainerLogSymlink := legacyLogSymlink(containerID, "foo", "bar", "new") assert.Equal(t, fakeOS.Removes, []string{expectedContainerLogPath, expectedContainerLogSymlink}) // Verify container is removed assert.Contains(t, fakeRuntime.Called, "RemoveContainer") - containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerId}) + containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID}) assert.NoError(t, err) assert.Empty(t, containers) } @@ -257,10 +257,10 @@ func TestLifeCycleHook(t *testing.T) { } fakeRunner := &containertest.FakeContainerCommandRunner{} - fakeHttp := &fakeHTTP{} + fakeHTTP := &fakeHTTP{} lcHanlder := lifecycle.NewHandlerRunner( - fakeHttp, + fakeHTTP, fakeRunner, nil) @@ -277,11 +277,11 @@ func TestLifeCycleHook(t *testing.T) { // Configured and working HTTP hook t.Run("PreStop-HTTPGet", func(t *testing.T) { - defer func() { fakeHttp.url = "" }() + defer func() { fakeHTTP.url = "" }() testPod.Spec.Containers[0].Lifecycle = httpLifeCycle m.killContainer(testPod, cID, "foo", "testKill", &gracePeriod) - if !strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) { + if !strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) { t.Errorf("HTTP Prestop hook was not invoked") } }) @@ -295,7 +295,7 @@ func TestLifeCycleHook(t *testing.T) { m.killContainer(testPod, cID, "foo", "testKill", &gracePeriodLocal) - if strings.Contains(fakeHttp.url, httpLifeCycle.PreStop.HTTPGet.Host) { + if strings.Contains(fakeHTTP.url, httpLifeCycle.PreStop.HTTPGet.Host) { t.Errorf("HTTP Should not execute when gracePeriod is 0") } }) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index a66a07370cb..3e0afbc45b6 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -39,7 +39,7 @@ type containerGC struct { } // NewContainerGC creates a new containerGC. -func NewContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC { +func newContainerGC(client internalapi.RuntimeService, podStateProvider podStateProvider, manager *kubeGenericRuntimeManager) *containerGC { return &containerGC{ client: client, manager: manager, diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go index 3d20455cc1d..6ab07df0945 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go @@ -109,8 +109,8 @@ func TestPullWithSecrets(t *testing.T) { t.Errorf("unexpected error: %v", err) } - dockerConfigJson := map[string]map[string]map[string]string{"auths": dockerCfg} - dockerConfigJsonContent, err := json.Marshal(dockerConfigJson) + dockerConfigJSON := map[string]map[string]map[string]string{"auths": dockerCfg} + dockerConfigJSONContent, err := json.Marshal(dockerConfigJSON) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -153,7 +153,7 @@ func TestPullWithSecrets(t *testing.T) { }, "builtin keyring secrets, but use passed with new docker config": { "ubuntu", - []v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJsonContent}}}, + []v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJSONContent}}}, credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{ "index.docker.io/v1/": {Username: "built-in", Password: "password", Provider: nil}, }), diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 2818990b11b..42b5ff9f86b 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubetypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/flowcontrol" @@ -46,7 +47,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/cache" "k8s.io/kubernetes/pkg/kubelet/util/format" - utilversion "k8s.io/kubernetes/pkg/util/version" ) const ( @@ -125,6 +125,7 @@ type kubeGenericRuntimeManager struct { runtimeClassManager *runtimeclass.Manager } +// KubeGenericRuntime is a interface contains interfaces for container runtime and command. type KubeGenericRuntime interface { kubecontainer.Runtime kubecontainer.StreamingRuntime @@ -216,7 +217,7 @@ func NewKubeGenericRuntimeManager( imagePullQPS, imagePullBurst) kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager) - kubeRuntimeManager.containerGC = NewContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) + kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager) kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { @@ -466,6 +467,10 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku if createPodSandbox { if !shouldRestartOnFailure(pod) && attempt != 0 { // Should not restart the pod, just return. + // we should not create a sandbox for a pod if it is already done. + // if all containers are done and should not be started, there is no need to create a new sandbox. + // this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it. + changes.CreateSandbox = false return changes } if len(pod.Spec.InitContainers) != 0 { @@ -539,7 +544,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku reason = "Container failed liveness probe." } else { // Keep the container. - keepCount += 1 + keepCount++ continue } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index b1de84e1c38..fc02734d0e9 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -54,7 +54,7 @@ func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (* // we may want to set memory capacity. machineInfo := &cadvisorapi.MachineInfo{} osInterface := &containertest.FakeOS{} - manager, err := NewFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring) + manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring) return fakeRuntimeService, fakeImageService, manager, err } @@ -906,6 +906,29 @@ func TestComputePodActions(t *testing.T) { // TODO: Add a test case for containers which failed the liveness // check. Will need to fake the livessness check result. }, + "Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": { + mutatePodFn: func(pod *v1.Pod) { + pod.Spec.RestartPolicy = v1.RestartPolicyNever + }, + mutateStatusFn: func(status *kubecontainer.PodStatus) { + // no ready sandbox + status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + status.SandboxStatuses[0].Metadata.Attempt = uint32(1) + // all containers exited + for i := range status.ContainerStatuses { + status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited + status.ContainerStatuses[i].ExitCode = 0 + } + }, + actions: podActions{ + SandboxID: baseStatus.SandboxStatuses[0].Id, + Attempt: uint32(2), + CreateSandbox: false, + KillPod: true, + ContainersToStart: []int{}, + ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, + }, + }, } { pod, status := makeBasePodAndStatus() if test.mutatePodFn != nil { diff --git a/pkg/kubelet/kuberuntime/legacy.go b/pkg/kubelet/kuberuntime/legacy.go index 2a00ad012f1..1a77e6f0c8d 100644 --- a/pkg/kubelet/kuberuntime/legacy.go +++ b/pkg/kubelet/kuberuntime/legacy.go @@ -44,9 +44,9 @@ func legacyLogSymlink(containerID string, containerName, podName, podNamespace s containerName, containerID) } -func logSymlink(containerLogsDir, podFullName, containerName, dockerId string) string { +func logSymlink(containerLogsDir, podFullName, containerName, dockerID string) string { suffix := fmt.Sprintf(".%s", legacyLogSuffix) - logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId) + logPath := fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID) // Length of a filename cannot exceed 255 characters in ext4 on Linux. if len(logPath) > ext4MaxFileNameLen-len(suffix) { logPath = logPath[:ext4MaxFileNameLen-len(suffix)] diff --git a/pkg/kubelet/kuberuntime/legacy_test.go b/pkg/kubelet/kuberuntime/legacy_test.go index 31e4d6967f8..48cdcb21fd1 100644 --- a/pkg/kubelet/kuberuntime/legacy_test.go +++ b/pkg/kubelet/kuberuntime/legacy_test.go @@ -40,10 +40,10 @@ func TestLogSymLink(t *testing.T) { containerLogsDir := "/foo/bar" podFullName := randStringBytes(128) containerName := randStringBytes(70) - dockerId := randStringBytes(80) + dockerID := randStringBytes(80) // The file name cannot exceed 255 characters. Since .log suffix is required, the prefix cannot exceed 251 characters. - expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerId)[:251]+".log") - as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerId)) + expectedPath := path.Join(containerLogsDir, fmt.Sprintf("%s_%s-%s", podFullName, containerName, dockerID)[:251]+".log") + as.Equal(expectedPath, logSymlink(containerLogsDir, podFullName, containerName, dockerID)) } func TestLegacyLogSymLink(t *testing.T) { diff --git a/pkg/kubelet/leaky/leaky.go b/pkg/kubelet/leaky/leaky.go index 4e3e1e1f27c..7c75002c47e 100644 --- a/pkg/kubelet/leaky/leaky.go +++ b/pkg/kubelet/leaky/leaky.go @@ -19,7 +19,7 @@ limitations under the License. package leaky const ( - // This is used in a few places outside of Kubelet, such as indexing + // PodInfraContainerName is used in a few places outside of Kubelet, such as indexing // into the container info. PodInfraContainerName = "POD" ) diff --git a/pkg/kubelet/network/dns/BUILD b/pkg/kubelet/network/dns/BUILD index acc66b182e9..ae7bba5cf9e 100644 --- a/pkg/kubelet/network/dns/BUILD +++ b/pkg/kubelet/network/dns/BUILD @@ -12,6 +12,7 @@ go_library( "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/kubelet/network/dns/dns.go b/pkg/kubelet/network/dns/dns.go index dd414f6029d..307c06ff872 100644 --- a/pkg/kubelet/network/dns/dns.go +++ b/pkg/kubelet/network/dns/dns.go @@ -26,6 +26,7 @@ import ( "strings" "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/apis/core/validation" @@ -209,6 +210,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, // Each option is recorded as an element in the array. options = []string{} + var allErrors []error lines := strings.Split(string(file), "\n") for l := range lines { trimmed := strings.TrimSpace(lines[l]) @@ -219,8 +221,12 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, if len(fields) == 0 { continue } - if fields[0] == "nameserver" && len(fields) >= 2 { - nameservers = append(nameservers, fields[1]) + if fields[0] == "nameserver" { + if len(fields) >= 2 { + nameservers = append(nameservers, fields[1]) + } else { + allErrors = append(allErrors, fmt.Errorf("nameserver list is empty ")) + } } if fields[0] == "search" { searches = fields[1:] @@ -230,7 +236,7 @@ func parseResolvConf(reader io.Reader) (nameservers []string, searches []string, } } - return nameservers, searches, options, nil + return nameservers, searches, options, utilerrors.NewAggregate(allErrors) } func (c *Configurer) getHostDNSConfig(pod *v1.Pod) (*runtimeapi.DNSConfig, error) { diff --git a/pkg/kubelet/network/dns/dns_test.go b/pkg/kubelet/network/dns/dns_test.go index 799e83510a7..f9249109d67 100644 --- a/pkg/kubelet/network/dns/dns_test.go +++ b/pkg/kubelet/network/dns/dns_test.go @@ -53,40 +53,48 @@ func TestParseResolvConf(t *testing.T) { nameservers []string searches []string options []string + isErr bool }{ - {"", []string{}, []string{}, []string{}}, - {" ", []string{}, []string{}, []string{}}, - {"\n", []string{}, []string{}, []string{}}, - {"\t\n\t", []string{}, []string{}, []string{}}, - {"#comment\n", []string{}, []string{}, []string{}}, - {" #comment\n", []string{}, []string{}, []string{}}, - {"#comment\n#comment", []string{}, []string{}, []string{}}, - {"#comment\nnameserver", []string{}, []string{}, []string{}}, - {"#comment\nnameserver\nsearch", []string{}, []string{}, []string{}}, - {"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}}, - {" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}}, - {"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}}, - {"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}}, - {"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}}, - {"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}, []string{}}, - {"nameserver 1.2.3.4 #comment", []string{"1.2.3.4"}, []string{}, []string{}}, - {"search foo", []string{}, []string{"foo"}, []string{}}, - {"search foo bar", []string{}, []string{"foo", "bar"}, []string{}}, - {"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}, []string{}}, - {"search foo\nsearch bar", []string{}, []string{"bar"}, []string{}}, - {"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}, []string{}}, - {"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}, []string{}}, - {"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}, []string{}}, - {"options ndots:5 attempts:2", []string{}, []string{}, []string{"ndots:5", "attempts:2"}}, - {"options ndots:1\noptions ndots:5 attempts:3", []string{}, []string{}, []string{"ndots:5", "attempts:3"}}, - {"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar\noptions ndots:5 attempts:4", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}, []string{"ndots:5", "attempts:4"}}, + {"", []string{}, []string{}, []string{}, false}, + {" ", []string{}, []string{}, []string{}, false}, + {"\n", []string{}, []string{}, []string{}, false}, + {"\t\n\t", []string{}, []string{}, []string{}, false}, + {"#comment\n", []string{}, []string{}, []string{}, false}, + {" #comment\n", []string{}, []string{}, []string{}, false}, + {"#comment\n#comment", []string{}, []string{}, []string{}, false}, + {"#comment\nnameserver", []string{}, []string{}, []string{}, true}, // nameserver empty + {"#comment\nnameserver\nsearch", []string{}, []string{}, []string{}, true}, // nameserver and search empty + {"#comment\nnameserver 1.2.3.4\nsearch", []string{"1.2.3.4"}, []string{}, []string{}, false}, // nameserver specified and search empty + {"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}, []string{}, false}, + {"nameserver 1.2.3.4 #comment", []string{"1.2.3.4"}, []string{}, []string{}, false}, + {"search ", []string{}, []string{}, []string{}, false}, // search empty + {"search foo", []string{}, []string{"foo"}, []string{}, false}, + {"search foo bar", []string{}, []string{"foo", "bar"}, []string{}, false}, + {"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}, []string{}, false}, + {"search foo\nsearch bar", []string{}, []string{"bar"}, []string{}, false}, + {"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}, []string{}, false}, + {"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}, []string{}, false}, + {"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}, []string{}, false}, + {"options ", []string{}, []string{}, []string{}, false}, + {"options ndots:5 attempts:2", []string{}, []string{}, []string{"ndots:5", "attempts:2"}, false}, + {"options ndots:1\noptions ndots:5 attempts:3", []string{}, []string{}, []string{"ndots:5", "attempts:3"}, false}, + {"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar\noptions ndots:5 attempts:4", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}, []string{"ndots:5", "attempts:4"}, false}, } for i, tc := range testCases { ns, srch, opts, err := parseResolvConf(strings.NewReader(tc.data)) - require.NoError(t, err) - assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i) - assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i) - assert.EqualValues(t, tc.options, opts, "test case [%d] options", i) + if !tc.isErr { + require.NoError(t, err) + assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i) + assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i) + assert.EqualValues(t, tc.options, opts, "test case [%d] options", i) + } else { + require.Error(t, err, "tc.searches %v", tc.searches) + } } } diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index d9286c96c62..2befde16b97 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -106,6 +106,7 @@ type podRecord struct { type podRecords map[types.UID]*podRecord +// NewGenericPLEG instantiates a new GenericPLEG object and return it. func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, relistPeriod time.Duration, cache kubecontainer.Cache, clock clock.Clock) PodLifecycleEventGenerator { return &GenericPLEG{ @@ -118,7 +119,7 @@ func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, } } -// Returns a channel from which the subscriber can receive PodLifecycleEvent +// Watch returns a channel from which the subscriber can receive PodLifecycleEvent // events. // TODO: support multiple subscribers. func (g *GenericPLEG) Watch() chan *PodLifecycleEvent { @@ -130,6 +131,8 @@ func (g *GenericPLEG) Start() { go wait.Until(g.relist, g.relistPeriod, wait.NeverStop) } +// Healthy check if PLEG work properly. +// relistThreshold is the maximum interval between two relist. func (g *GenericPLEG) Healthy() (bool, error) { relistTime := g.getRelistTime() elapsed := g.clock.Since(relistTime) diff --git a/pkg/kubelet/pleg/pleg.go b/pkg/kubelet/pleg/pleg.go index aa0e46e465d..86b48c7ef13 100644 --- a/pkg/kubelet/pleg/pleg.go +++ b/pkg/kubelet/pleg/pleg.go @@ -20,16 +20,20 @@ import ( "k8s.io/apimachinery/pkg/types" ) +// PodLifeCycleEventType define the event type of pod life cycle events. type PodLifeCycleEventType string const ( + // ContainerStarted - event type when the new state of container is running. ContainerStarted PodLifeCycleEventType = "ContainerStarted" - ContainerDied PodLifeCycleEventType = "ContainerDied" + // ContainerDied - event type when the new state of container is exited. + ContainerDied PodLifeCycleEventType = "ContainerDied" + // ContainerRemoved - event type when the old state of container is exited. ContainerRemoved PodLifeCycleEventType = "ContainerRemoved" // PodSync is used to trigger syncing of a pod when the observed change of // the state of the pod cannot be captured by any single event above. PodSync PodLifeCycleEventType = "PodSync" - // Do not use the events below because they are disabled in GenericPLEG. + // ContainerChanged - event type when the new state of container is unknown. ContainerChanged PodLifeCycleEventType = "ContainerChanged" ) @@ -45,6 +49,7 @@ type PodLifecycleEvent struct { Data interface{} } +// PodLifecycleEventGenerator contains functions for generating pod life cycle events. type PodLifecycleEventGenerator interface { Start() Watch() chan *PodLifecycleEvent diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 5a1fb992725..a42589a99c5 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -18,6 +18,7 @@ package kubelet import ( "fmt" + "strings" "sync" "time" @@ -96,8 +97,11 @@ const ( // jitter factor for resyncInterval workerResyncIntervalJitterFactor = 0.5 - // jitter factor for backOffPeriod + // jitter factor for backOffPeriod and backOffOnTransientErrorPeriod workerBackOffPeriodJitterFactor = 0.5 + + // backoff period when transient error occurred. + backOffOnTransientErrorPeriod = time.Second ) type podWorkers struct { @@ -263,6 +267,9 @@ func (p *podWorkers) wrapUp(uid types.UID, syncErr error) { case syncErr == nil: // No error; requeue at the regular resync interval. p.workQueue.Enqueue(uid, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor)) + case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg): + // Network is not ready; back off for short period of time and retry as network might be ready soon. + p.workQueue.Enqueue(uid, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor)) default: // Error occurred during the sync; back off and then retry. p.workQueue.Enqueue(uid, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor)) diff --git a/pkg/kubelet/remote/remote_image.go b/pkg/kubelet/remote/remote_image.go index d6eae5e38f9..a6bc88f7cc4 100644 --- a/pkg/kubelet/remote/remote_image.go +++ b/pkg/kubelet/remote/remote_image.go @@ -17,6 +17,7 @@ limitations under the License. package remote import ( + "context" "errors" "fmt" "time" @@ -43,7 +44,10 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration) (in return nil, err } - conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimeout), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { glog.Errorf("Connect remote image service %s failed: %v", addr, err) return nil, err diff --git a/pkg/kubelet/remote/remote_runtime.go b/pkg/kubelet/remote/remote_runtime.go index 01d59ee4c29..f447b850e15 100644 --- a/pkg/kubelet/remote/remote_runtime.go +++ b/pkg/kubelet/remote/remote_runtime.go @@ -45,7 +45,10 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) ( if err != nil { return nil, err } - conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimeout), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) + ctx, cancel := context.WithTimeout(context.Background(), connectionTimeout) + defer cancel() + + conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithDialer(dailer), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize))) if err != nil { glog.Errorf("Connect remote runtime %s failed: %v", addr, err) return nil, err diff --git a/pkg/kubelet/server/auth.go b/pkg/kubelet/server/auth.go index 94a4d30915d..68e483f6ce0 100644 --- a/pkg/kubelet/server/auth.go +++ b/pkg/kubelet/server/auth.go @@ -108,7 +108,7 @@ func (n nodeAuthorizerAttributesGetter) GetRequestAttributes(u user.Info, r *htt attrs.Subresource = "spec" } - glog.V(5).Infof("Node request attributes: attrs=%#v", attrs) + glog.V(5).Infof("Node request attributes: user=%#v attrs=%#v", attrs.GetUser(), attrs) return attrs } diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index e3c54a07bce..f3cef84cd12 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -51,6 +51,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" + // Do some initialization to decode the query parameters correctly. _ "k8s.io/kubernetes/pkg/apis/core/install" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -1166,7 +1167,7 @@ func TestServeExecInContainerIdleTimeout(t *testing.T) { url := fw.testHTTPServer.URL + "/exec/" + podNamespace + "/" + podName + "/" + expectedContainerName + "?c=ls&c=-a&" + api.ExecStdinParam + "=1" - upgradeRoundTripper := spdy.NewSpdyRoundTripper(nil, true) + upgradeRoundTripper := spdy.NewSpdyRoundTripper(nil, true, true) c := &http.Client{Transport: upgradeRoundTripper} resp, err := c.Post(url, "", nil) @@ -1332,7 +1333,7 @@ func testExecAttach(t *testing.T, verb string) { return http.ErrUseLastResponse } } else { - upgradeRoundTripper = spdy.NewRoundTripper(nil, true) + upgradeRoundTripper = spdy.NewRoundTripper(nil, true, true) c = &http.Client{Transport: upgradeRoundTripper} } @@ -1429,7 +1430,7 @@ func TestServePortForwardIdleTimeout(t *testing.T) { url := fw.testHTTPServer.URL + "/portForward/" + podNamespace + "/" + podName - upgradeRoundTripper := spdy.NewRoundTripper(nil, true) + upgradeRoundTripper := spdy.NewRoundTripper(nil, true, true) c := &http.Client{Transport: upgradeRoundTripper} resp, err := c.Post(url, "", nil) @@ -1536,7 +1537,7 @@ func TestServePortForward(t *testing.T) { return http.ErrUseLastResponse } } else { - upgradeRoundTripper = spdy.NewRoundTripper(nil, true) + upgradeRoundTripper = spdy.NewRoundTripper(nil, true, true) c = &http.Client{Transport: upgradeRoundTripper} } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index f069cd898cb..6e665344503 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -192,10 +192,23 @@ func (h *handler) handleStats(request *restful.Request, response *restful.Respon } // Handles stats summary requests to /stats/summary +// If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response. func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { - // external calls to the summary API use cached stats - forceStatsUpdate := false - summary, err := h.summaryProvider.Get(forceStatsUpdate) + onlyCPUAndMemory := false + request.Request.ParseForm() + if onlyCluAndMemoryParam, found := request.Request.Form["only_cpu_and_memory"]; found && + len(onlyCluAndMemoryParam) == 1 && onlyCluAndMemoryParam[0] == "true" { + onlyCPUAndMemory = true + } + var summary *statsapi.Summary + var err error + if onlyCPUAndMemory { + summary, err = h.summaryProvider.GetCPUAndMemoryStats() + } else { + // external calls to the summary API use cached stats + forceStatsUpdate := false + summary, err = h.summaryProvider.Get(forceStatsUpdate) + } if err != nil { handleError(response, "/stats/summary", err) } else { diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index 900a8ad97d7..2897aff50f5 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -26,6 +26,8 @@ type SummaryProvider interface { // Get provides a new Summary with the stats from Kubelet, // and will update some stats if updateStats is true Get(updateStats bool) (*statsapi.Summary, error) + // GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet, + GetCPUAndMemoryStats() (*statsapi.Summary, error) } // summaryProviderImpl implements the SummaryProvider interface. @@ -87,3 +89,32 @@ func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) } return &summary, nil } + +func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error) { + summary, err := sp.Get(false) + if err != nil { + return nil, err + } + summary.Node.Network = nil + summary.Node.Fs = nil + summary.Node.Runtime = nil + summary.Node.Rlimit = nil + for i := 0; i < len(summary.Node.SystemContainers); i++ { + summary.Node.SystemContainers[i].Accelerators = nil + summary.Node.SystemContainers[i].Rootfs = nil + summary.Node.SystemContainers[i].Logs = nil + summary.Node.SystemContainers[i].UserDefinedMetrics = nil + } + for i := 0; i < len(summary.Pods); i++ { + summary.Pods[i].Network = nil + summary.Pods[i].VolumeStats = nil + summary.Pods[i].EphemeralStorage = nil + for j := 0; j < len(summary.Pods[i].Containers); j++ { + summary.Pods[i].Containers[j].Accelerators = nil + summary.Pods[i].Containers[j].Rootfs = nil + summary.Pods[i].Containers[j].Logs = nil + summary.Pods[i].Containers[j].UserDefinedMetrics = nil + } + } + return summary, nil +} diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index 7807e2cf412..b6d7dbbab74 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -32,39 +32,39 @@ import ( statstest "k8s.io/kubernetes/pkg/kubelet/server/stats/testing" ) -func TestSummaryProvider(t *testing.T) { - var ( - podStats = []statsapi.PodStats{ - { - PodRef: statsapi.PodReference{Name: "test-pod", Namespace: "test-namespace", UID: "UID_test-pod"}, - StartTime: metav1.NewTime(time.Now()), - Containers: []statsapi.ContainerStats{*getContainerStats()}, - Network: getNetworkStats(), - VolumeStats: []statsapi.VolumeStats{*getVolumeStats()}, - }, - } - imageFsStats = getFsStats() - rootFsStats = getFsStats() - node = &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "test-node"}} - nodeConfig = cm.NodeConfig{ - RuntimeCgroupsName: "/runtime", - SystemCgroupsName: "/misc", - KubeletCgroupsName: "/kubelet", - } - cgroupRoot = "/kubepods" - cgroupStatsMap = map[string]struct { - cs *statsapi.ContainerStats - ns *statsapi.NetworkStats - }{ - "/": {cs: getContainerStats(), ns: getNetworkStats()}, - "/runtime": {cs: getContainerStats(), ns: getNetworkStats()}, - "/misc": {cs: getContainerStats(), ns: getNetworkStats()}, - "/kubelet": {cs: getContainerStats(), ns: getNetworkStats()}, - "/pods": {cs: getContainerStats(), ns: getNetworkStats()}, - } - rlimitStats = getRlimitStats() - ) +var ( + podStats = []statsapi.PodStats{ + { + PodRef: statsapi.PodReference{Name: "test-pod", Namespace: "test-namespace", UID: "UID_test-pod"}, + StartTime: metav1.NewTime(time.Now()), + Containers: []statsapi.ContainerStats{*getContainerStats()}, + Network: getNetworkStats(), + VolumeStats: []statsapi.VolumeStats{*getVolumeStats()}, + }, + } + imageFsStats = getFsStats() + rootFsStats = getFsStats() + node = &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "test-node"}} + nodeConfig = cm.NodeConfig{ + RuntimeCgroupsName: "/runtime", + SystemCgroupsName: "/misc", + KubeletCgroupsName: "/kubelet", + } + cgroupRoot = "/kubepods" + cgroupStatsMap = map[string]struct { + cs *statsapi.ContainerStats + ns *statsapi.NetworkStats + }{ + "/": {cs: getContainerStats(), ns: getNetworkStats()}, + "/runtime": {cs: getContainerStats(), ns: getNetworkStats()}, + "/misc": {cs: getContainerStats(), ns: getNetworkStats()}, + "/kubelet": {cs: getContainerStats(), ns: getNetworkStats()}, + "/pods": {cs: getContainerStats(), ns: getNetworkStats()}, + } + rlimitStats = getRlimitStats() +) +func TestSummaryProviderGetStats(t *testing.T) { assert := assert.New(t) mockStatsProvider := new(statstest.StatsProvider) @@ -130,6 +130,64 @@ func TestSummaryProvider(t *testing.T) { assert.Equal(summary.Pods, podStats) } +func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) { + assert := assert.New(t) + + mockStatsProvider := new(statstest.StatsProvider) + mockStatsProvider. + On("GetNode").Return(node, nil). + On("GetNodeConfig").Return(nodeConfig). + On("GetPodCgroupRoot").Return(cgroupRoot). + On("ListPodStats").Return(podStats, nil). + On("ImageFsStats").Return(imageFsStats, nil). + On("RootFsStats").Return(rootFsStats, nil). + On("RlimitStats").Return(rlimitStats, nil). + On("GetCgroupStats", "/", false).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil). + On("GetCgroupStats", "/runtime", false).Return(cgroupStatsMap["/runtime"].cs, cgroupStatsMap["/runtime"].ns, nil). + On("GetCgroupStats", "/misc", false).Return(cgroupStatsMap["/misc"].cs, cgroupStatsMap["/misc"].ns, nil). + On("GetCgroupStats", "/kubelet", false).Return(cgroupStatsMap["/kubelet"].cs, cgroupStatsMap["/kubelet"].ns, nil). + On("GetCgroupStats", "/kubepods", false).Return(cgroupStatsMap["/pods"].cs, cgroupStatsMap["/pods"].ns, nil) + + provider := NewSummaryProvider(mockStatsProvider) + summary, err := provider.GetCPUAndMemoryStats() + assert.NoError(err) + + assert.Equal(summary.Node.NodeName, "test-node") + assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime) + assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU) + assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory) + assert.Nil(summary.Node.Network) + assert.Nil(summary.Node.Fs) + assert.Nil(summary.Node.Runtime) + + assert.Equal(len(summary.Node.SystemContainers), 4) + assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{ + Name: "kubelet", + StartTime: cgroupStatsMap["/kubelet"].cs.StartTime, + CPU: cgroupStatsMap["/kubelet"].cs.CPU, + Memory: cgroupStatsMap["/kubelet"].cs.Memory, + }) + assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{ + Name: "misc", + StartTime: cgroupStatsMap["/misc"].cs.StartTime, + CPU: cgroupStatsMap["/misc"].cs.CPU, + Memory: cgroupStatsMap["/misc"].cs.Memory, + }) + assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{ + Name: "runtime", + StartTime: cgroupStatsMap["/runtime"].cs.StartTime, + CPU: cgroupStatsMap["/runtime"].cs.CPU, + Memory: cgroupStatsMap["/runtime"].cs.Memory, + }) + assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{ + Name: "pods", + StartTime: cgroupStatsMap["/pods"].cs.StartTime, + CPU: cgroupStatsMap["/pods"].cs.CPU, + Memory: cgroupStatsMap["/pods"].cs.Memory, + }) + assert.Equal(summary.Pods, podStats) +} + func getFsStats() *statsapi.FsStats { f := fuzz.New().NilChance(0) v := &statsapi.FsStats{} diff --git a/pkg/kubelet/stats/stats_provider_test.go b/pkg/kubelet/stats/stats_provider_test.go index e2abed50991..bbb149075bf 100644 --- a/pkg/kubelet/stats/stats_provider_test.go +++ b/pkg/kubelet/stats/stats_provider_test.go @@ -634,8 +634,9 @@ type fakeResourceAnalyzer struct { podVolumeStats serverstats.PodVolumeStats } -func (o *fakeResourceAnalyzer) Start() {} -func (o *fakeResourceAnalyzer) Get(bool) (*statsapi.Summary, error) { return nil, nil } +func (o *fakeResourceAnalyzer) Start() {} +func (o *fakeResourceAnalyzer) Get(bool) (*statsapi.Summary, error) { return nil, nil } +func (o *fakeResourceAnalyzer) GetCPUAndMemoryStats() (*statsapi.Summary, error) { return nil, nil } func (o *fakeResourceAnalyzer) GetPodVolumeStats(uid types.UID) (serverstats.PodVolumeStats, bool) { return o.podVolumeStats, true } diff --git a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go index cbc33e47444..de2addd2846 100644 --- a/pkg/kubelet/util/pluginwatcher/plugin_watcher.go +++ b/pkg/kubelet/util/pluginwatcher/plugin_watcher.go @@ -385,8 +385,10 @@ func (w *Watcher) notifyPlugin(client registerapi.RegistrationClient, registered // Dial establishes the gRPC communication with the picked up plugin socket. https://godoc.org/google.golang.org/grpc#Dial func dial(unixSocketPath string, timeout time.Duration) (registerapi.RegistrationClient, *grpc.ClientConn, error) { - c, err := grpc.Dial(unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), - grpc.WithTimeout(timeout), + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + c, err := grpc.DialContext(ctx, unixSocketPath, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", addr, timeout) }), diff --git a/pkg/printers/internalversion/describe.go b/pkg/printers/internalversion/describe.go index 1fe2b0ffd1a..8549447de89 100644 --- a/pkg/printers/internalversion/describe.go +++ b/pkg/printers/internalversion/describe.go @@ -3093,8 +3093,8 @@ func describeHorizontalPodAutoscaler(hpa *autoscaling.HorizontalPodAutoscaler, e func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node, w PrefixWriter) { w.Write(LEVEL_0, "Non-terminated Pods:\t(%d in total)\n", len(nodeNonTerminatedPodsList.Items)) - w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\n") - w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\n") + w.Write(LEVEL_1, "Namespace\tName\t\tCPU Requests\tCPU Limits\tMemory Requests\tMemory Limits\tAGE\n") + w.Write(LEVEL_1, "---------\t----\t\t------------\t----------\t---------------\t-------------\t---\n") allocatable := node.Status.Capacity if len(node.Status.Allocatable) > 0 { allocatable = node.Status.Allocatable @@ -3107,9 +3107,9 @@ func describeNodeResource(nodeNonTerminatedPodsList *api.PodList, node *api.Node fractionCpuLimit := float64(cpuLimit.MilliValue()) / float64(allocatable.Cpu().MilliValue()) * 100 fractionMemoryReq := float64(memoryReq.Value()) / float64(allocatable.Memory().Value()) * 100 fractionMemoryLimit := float64(memoryLimit.Value()) / float64(allocatable.Memory().Value()) * 100 - w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\n", pod.Namespace, pod.Name, + w.Write(LEVEL_1, "%s\t%s\t\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s (%d%%)\t%s\n", pod.Namespace, pod.Name, cpuReq.String(), int64(fractionCpuReq), cpuLimit.String(), int64(fractionCpuLimit), - memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit)) + memoryReq.String(), int64(fractionMemoryReq), memoryLimit.String(), int64(fractionMemoryLimit), translateTimestampSince(pod.CreationTimestamp)) } w.Write(LEVEL_0, "Allocated resources:\n (Total limits may be over 100 percent, i.e., overcommitted.)\n") diff --git a/pkg/proxy/iptables/BUILD b/pkg/proxy/iptables/BUILD index 1fb1875316b..84154ed565f 100644 --- a/pkg/proxy/iptables/BUILD +++ b/pkg/proxy/iptables/BUILD @@ -20,9 +20,9 @@ go_library( "//pkg/util/iptables:go_default_library", "//pkg/util/net:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 41f03d49cde..86e21f22dec 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -36,6 +36,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/kubernetes/pkg/proxy" @@ -47,7 +48,6 @@ import ( utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilnet "k8s.io/kubernetes/pkg/util/net" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" - utilversion "k8s.io/kubernetes/pkg/util/version" utilexec "k8s.io/utils/exec" ) @@ -293,8 +293,10 @@ func NewProxier(ipt utiliptables.Interface, nodePortAddresses []string, ) (*Proxier, error) { // Set the route_localnet sysctl we need for - if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + if val, _ := sysctl.GetSysctl(sysctlRouteLocalnet); val != 1 { + if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + } } // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers diff --git a/pkg/proxy/ipvs/BUILD b/pkg/proxy/ipvs/BUILD index 6b1141aadf1..e7b13f3cac6 100644 --- a/pkg/proxy/ipvs/BUILD +++ b/pkg/proxy/ipvs/BUILD @@ -37,6 +37,7 @@ go_test( go_library( name = "go_default_library", srcs = [ + "graceful_termination.go", "ipset.go", "netlink.go", "netlink_linux.go", @@ -56,10 +57,10 @@ go_library( "//pkg/util/ipvs:go_default_library", "//pkg/util/net:go_default_library", "//pkg/util/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/proxy/ipvs/graceful_termination.go b/pkg/proxy/ipvs/graceful_termination.go new file mode 100644 index 00000000000..95973fa75db --- /dev/null +++ b/pkg/proxy/ipvs/graceful_termination.go @@ -0,0 +1,223 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipvs + +import ( + "sync" + "time" + + "fmt" + "github.com/golang/glog" + "k8s.io/apimachinery/pkg/util/wait" + utilipvs "k8s.io/kubernetes/pkg/util/ipvs" +) + +const ( + rsGracefulDeletePeriod = 15 * time.Minute + rsCheckDeleteInterval = 1 * time.Minute +) + +// listItem stores real server information and the process time. +// If nothing special happened, real server will be delete after process time. +type listItem struct { + VirtualServer *utilipvs.VirtualServer + RealServer *utilipvs.RealServer +} + +// String return the unique real server name(with virtual server information) +func (g *listItem) String() string { + return GetUniqueRSName(g.VirtualServer, g.RealServer) +} + +// GetUniqueRSName return a string type unique rs name with vs information +func GetUniqueRSName(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) string { + return vs.String() + "/" + rs.String() +} + +type graceTerminateRSList struct { + lock sync.Mutex + list map[string]*listItem +} + +// add push an new element to the rsList +func (q *graceTerminateRSList) add(rs *listItem) bool { + q.lock.Lock() + defer q.lock.Unlock() + + uniqueRS := rs.String() + if _, ok := q.list[uniqueRS]; ok { + return false + } + + glog.V(5).Infof("Adding rs %v to graceful delete rsList", rs) + q.list[uniqueRS] = rs + return true +} + +// remove remove an element from the rsList +func (q *graceTerminateRSList) remove(rs *listItem) bool { + q.lock.Lock() + defer q.lock.Unlock() + + uniqueRS := rs.String() + if _, ok := q.list[uniqueRS]; ok { + return false + } + delete(q.list, uniqueRS) + return true +} + +func (q *graceTerminateRSList) flushList(handler func(rsToDelete *listItem) (bool, error)) bool { + q.lock.Lock() + defer q.lock.Unlock() + + success := true + for name, rs := range q.list { + deleted, err := handler(rs) + if err != nil { + glog.Errorf("Try delete rs %q err: %v", name, err) + success = false + } + if deleted { + glog.Infof("lw: remote out of the list: %s", name) + q.remove(rs) + } + } + return success +} + +// exist check whether the specified unique RS is in the rsList +func (q *graceTerminateRSList) exist(uniqueRS string) (*listItem, bool) { + q.lock.Lock() + defer q.lock.Unlock() + + if _, ok := q.list[uniqueRS]; ok { + return nil, false + } + return nil, false +} + +// GracefulTerminationManager manage rs graceful termination information and do graceful termination work +// rsList is the rs list to graceful termination, ipvs is the ipvsinterface to do ipvs delete/update work +type GracefulTerminationManager struct { + rsList graceTerminateRSList + ipvs utilipvs.Interface +} + +// NewGracefulTerminationManager create a gracefulTerminationManager to manage ipvs rs graceful termination work +func NewGracefulTerminationManager(ipvs utilipvs.Interface) *GracefulTerminationManager { + l := make(map[string]*listItem) + return &GracefulTerminationManager{ + rsList: graceTerminateRSList{ + list: l, + }, + ipvs: ipvs, + } +} + +// InTerminationList to check whether specified unique rs name is in graceful termination list +func (m *GracefulTerminationManager) InTerminationList(uniqueRS string) bool { + _, exist := m.rsList.exist(uniqueRS) + return exist +} + +// GracefulDeleteRS to update rs weight to 0, and add rs to graceful terminate list +func (m *GracefulTerminationManager) GracefulDeleteRS(vs *utilipvs.VirtualServer, rs *utilipvs.RealServer) error { + // Try to delete rs before add it to graceful delete list + ele := &listItem{ + VirtualServer: vs, + RealServer: rs, + } + deleted, err := m.deleteRsFunc(ele) + if err != nil { + glog.Errorf("Delete rs %q err: %v", err) + } + if deleted { + return nil + } + rs.Weight = 0 + err = m.ipvs.UpdateRealServer(vs, rs) + if err != nil { + return err + } + glog.V(5).Infof("Adding an element to graceful delete rsList: %+v", ele) + m.rsList.add(ele) + return nil +} + +func (m *GracefulTerminationManager) deleteRsFunc(rsToDelete *listItem) (bool, error) { + glog.Infof("Trying to delete rs: %s", rsToDelete.String()) + rss, err := m.ipvs.GetRealServers(rsToDelete.VirtualServer) + if err != nil { + return false, err + } + for _, rs := range rss { + if rsToDelete.RealServer.Equal(rs) { + if rs.ActiveConn != 0 { + return false, nil + } + glog.Infof("Deleting rs: %s", rsToDelete.String()) + err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rs) + if err != nil { + return false, fmt.Errorf("Delete destination %q err: %v", rs.String(), err) + } + return true, nil + } + } + return false, fmt.Errorf("Failed to delete rs %q, can't find the real server", rsToDelete.String()) +} + +func (m *GracefulTerminationManager) tryDeleteRs() { + if !m.rsList.flushList(m.deleteRsFunc) { + glog.Errorf("Try flush graceful termination list err") + } +} + +// MoveRSOutofGracefulDeleteList to delete an rs and remove it from the rsList immediately +func (m *GracefulTerminationManager) MoveRSOutofGracefulDeleteList(uniqueRS string) error { + rsToDelete, find := m.rsList.exist(uniqueRS) + if !find || rsToDelete == nil { + return fmt.Errorf("failed to find rs: %q", uniqueRS) + } + err := m.ipvs.DeleteRealServer(rsToDelete.VirtualServer, rsToDelete.RealServer) + if err != nil { + return err + } + m.rsList.remove(rsToDelete) + return nil +} + +// Run start a goroutine to try to delete rs in the graceful delete rsList with an interval 1 minute +func (m *GracefulTerminationManager) Run() { + // before start, add leftover in delete rs to graceful delete rsList + vss, err := m.ipvs.GetVirtualServers() + if err != nil { + glog.Errorf("IPVS graceful delete manager failed to get IPVS virtualserver") + } + for _, vs := range vss { + rss, err := m.ipvs.GetRealServers(vs) + if err != nil { + glog.Errorf("IPVS graceful delete manager failed to get %v realserver", vs) + continue + } + for _, rs := range rss { + m.GracefulDeleteRS(vs, rs) + } + } + + go wait.Until(m.tryDeleteRs, rsCheckDeleteInterval, wait.NeverStop) +} diff --git a/pkg/proxy/ipvs/ipset.go b/pkg/proxy/ipvs/ipset.go index 99bdc6d1542..e25ee009853 100644 --- a/pkg/proxy/ipvs/ipset.go +++ b/pkg/proxy/ipvs/ipset.go @@ -18,8 +18,8 @@ package ipvs import ( "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" utilipset "k8s.io/kubernetes/pkg/util/ipset" - utilversion "k8s.io/kubernetes/pkg/util/version" "fmt" "github.com/golang/glog" diff --git a/pkg/proxy/ipvs/netlink_linux.go b/pkg/proxy/ipvs/netlink_linux.go index 95c4d17f338..0c671200f03 100644 --- a/pkg/proxy/ipvs/netlink_linux.go +++ b/pkg/proxy/ipvs/netlink_linux.go @@ -115,7 +115,7 @@ func (h *netlinkHandle) ListBindAddress(devName string) ([]string, error) { if err != nil { return nil, fmt.Errorf("error list bound address of interface: %s, err: %v", devName, err) } - ips := make([]string, 0) + var ips []string for _, addr := range addrs { ips = append(ips, addr.IP.String()) } diff --git a/pkg/proxy/ipvs/proxier.go b/pkg/proxy/ipvs/proxier.go index 0fce48eb726..2cea33223bd 100644 --- a/pkg/proxy/ipvs/proxier.go +++ b/pkg/proxy/ipvs/proxier.go @@ -231,7 +231,8 @@ type Proxier struct { nodePortAddresses []string // networkInterfacer defines an interface for several net library functions. // Inject for test purpose. - networkInterfacer utilproxy.NetworkInterfacer + networkInterfacer utilproxy.NetworkInterfacer + gracefuldeleteManager *GracefulTerminationManager } // IPGetter helps get node network interface IP @@ -299,8 +300,10 @@ func NewProxier(ipt utiliptables.Interface, nodePortAddresses []string, ) (*Proxier, error) { // Set the route_localnet sysctl we need for - if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + if val, _ := sysctl.GetSysctl(sysctlRouteLocalnet); val != 1 { + if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) + } } // Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers @@ -311,13 +314,17 @@ func NewProxier(ipt utiliptables.Interface, } // Set the conntrack sysctl we need for - if err := sysctl.SetSysctl(sysctlVSConnTrack, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlVSConnTrack, err) + if val, _ := sysctl.GetSysctl(sysctlVSConnTrack); val != 1 { + if err := sysctl.SetSysctl(sysctlVSConnTrack, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlVSConnTrack, err) + } } // Set the ip_forward sysctl we need for - if err := sysctl.SetSysctl(sysctlForward, 1); err != nil { - return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlForward, err) + if val, _ := sysctl.GetSysctl(sysctlForward); val != 1 { + if err := sysctl.SetSysctl(sysctlForward, 1); err != nil { + return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlForward, err) + } } // Generate the masquerade mark to use for SNAT rules. @@ -347,38 +354,39 @@ func NewProxier(ipt utiliptables.Interface, healthChecker := healthcheck.NewServer(hostname, recorder, nil, nil) // use default implementations of deps proxier := &Proxier{ - portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), - serviceMap: make(proxy.ServiceMap), - serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder), - endpointsMap: make(proxy.EndpointsMap), - endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder), - syncPeriod: syncPeriod, - minSyncPeriod: minSyncPeriod, - excludeCIDRs: excludeCIDRs, - iptables: ipt, - masqueradeAll: masqueradeAll, - masqueradeMark: masqueradeMark, - exec: exec, - clusterCIDR: clusterCIDR, - hostname: hostname, - nodeIP: nodeIP, - portMapper: &listenPortOpener{}, - recorder: recorder, - healthChecker: healthChecker, - healthzServer: healthzServer, - ipvs: ipvs, - ipvsScheduler: scheduler, - ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, - iptablesData: bytes.NewBuffer(nil), - filterChainsData: bytes.NewBuffer(nil), - natChains: bytes.NewBuffer(nil), - natRules: bytes.NewBuffer(nil), - filterChains: bytes.NewBuffer(nil), - filterRules: bytes.NewBuffer(nil), - netlinkHandle: NewNetLinkHandle(), - ipset: ipset, - nodePortAddresses: nodePortAddresses, - networkInterfacer: utilproxy.RealNetwork{}, + portsMap: make(map[utilproxy.LocalPort]utilproxy.Closeable), + serviceMap: make(proxy.ServiceMap), + serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, &isIPv6, recorder), + endpointsMap: make(proxy.EndpointsMap), + endpointsChanges: proxy.NewEndpointChangeTracker(hostname, nil, &isIPv6, recorder), + syncPeriod: syncPeriod, + minSyncPeriod: minSyncPeriod, + excludeCIDRs: excludeCIDRs, + iptables: ipt, + masqueradeAll: masqueradeAll, + masqueradeMark: masqueradeMark, + exec: exec, + clusterCIDR: clusterCIDR, + hostname: hostname, + nodeIP: nodeIP, + portMapper: &listenPortOpener{}, + recorder: recorder, + healthChecker: healthChecker, + healthzServer: healthzServer, + ipvs: ipvs, + ipvsScheduler: scheduler, + ipGetter: &realIPGetter{nl: NewNetLinkHandle()}, + iptablesData: bytes.NewBuffer(nil), + filterChainsData: bytes.NewBuffer(nil), + natChains: bytes.NewBuffer(nil), + natRules: bytes.NewBuffer(nil), + filterChains: bytes.NewBuffer(nil), + filterRules: bytes.NewBuffer(nil), + netlinkHandle: NewNetLinkHandle(), + ipset: ipset, + nodePortAddresses: nodePortAddresses, + networkInterfacer: utilproxy.RealNetwork{}, + gracefuldeleteManager: NewGracefulTerminationManager(ipvs), } // initialize ipsetList with all sets we needed proxier.ipsetList = make(map[string]*IPSet) @@ -391,6 +399,7 @@ func NewProxier(ipt utiliptables.Interface, burstSyncs := 2 glog.V(3).Infof("minSyncPeriod: %v, syncPeriod: %v, burstSyncs: %d", minSyncPeriod, syncPeriod, burstSyncs) proxier.syncRunner = async.NewBoundedFrequencyRunner("sync-runner", proxier.syncProxyRules, minSyncPeriod, syncPeriod, burstSyncs) + proxier.gracefuldeleteManager.Run() return proxier, nil } @@ -697,6 +706,8 @@ func (proxier *Proxier) syncProxyRules() { // This is to avoid memory reallocations and thus improve performance. proxier.natChains.Reset() proxier.natRules.Reset() + proxier.filterChains.Reset() + proxier.filterRules.Reset() // Write table headers. writeLine(proxier.filterChains, "*filter") @@ -989,7 +1000,7 @@ func (proxier *Proxier) syncProxyRules() { continue } - lps := make([]utilproxy.LocalPort, 0) + var lps []utilproxy.LocalPort for address := range addresses { lp := utilproxy.LocalPort{ Description: "nodePort for " + svcNameString, @@ -1080,7 +1091,7 @@ func (proxier *Proxier) syncProxyRules() { } // Build ipvs kernel routes for each node ip address - nodeIPs := make([]net.IP, 0) + var nodeIPs []net.IP for address := range addresses { if !utilproxy.IsZeroCIDR(address) { nodeIPs = append(nodeIPs, net.ParseIP(address)) @@ -1131,6 +1142,8 @@ func (proxier *Proxier) syncProxyRules() { proxier.iptablesData.Reset() proxier.iptablesData.Write(proxier.natChains.Bytes()) proxier.iptablesData.Write(proxier.natRules.Bytes()) + proxier.iptablesData.Write(proxier.filterChains.Bytes()) + proxier.iptablesData.Write(proxier.filterRules.Bytes()) glog.V(5).Infof("Restoring iptables rules: %s", proxier.iptablesData.Bytes()) err = proxier.iptables.RestoreAll(proxier.iptablesData.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters) @@ -1500,53 +1513,72 @@ func (proxier *Proxier) syncEndpoint(svcPortName proxy.ServicePortName, onlyNode newEndpoints.Insert(epInfo.String()) } - if !curEndpoints.Equal(newEndpoints) { - // Create new endpoints - for _, ep := range newEndpoints.Difference(curEndpoints).UnsortedList() { - ip, port, err := net.SplitHostPort(ep) - if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) - continue - } - portNum, err := strconv.Atoi(port) - if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) - continue - } + // Create new endpoints + for _, ep := range newEndpoints.List() { + ip, port, err := net.SplitHostPort(ep) + if err != nil { + glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + continue + } + portNum, err := strconv.Atoi(port) + if err != nil { + glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + continue + } - newDest := &utilipvs.RealServer{ - Address: net.ParseIP(ip), - Port: uint16(portNum), - Weight: 1, + newDest := &utilipvs.RealServer{ + Address: net.ParseIP(ip), + Port: uint16(portNum), + Weight: 1, + } + + if curEndpoints.Has(ep) { + // check if newEndpoint is in gracefulDelete list, is true, delete this ep immediately + uniqueRS := GetUniqueRSName(vs, newDest) + if !proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { + continue } - err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) + glog.V(5).Infof("new ep %q is in graceful delete list", uniqueRS) + err := proxier.gracefuldeleteManager.MoveRSOutofGracefulDeleteList(uniqueRS) if err != nil { - glog.Errorf("Failed to add destination: %v, error: %v", newDest, err) + glog.Errorf("Failed to delete endpoint: %v in gracefulDeleteQueue, error: %v", ep, err) continue } } - // Delete old endpoints - for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() { - ip, port, err := net.SplitHostPort(ep) - if err != nil { - glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) - continue - } - portNum, err := strconv.Atoi(port) - if err != nil { - glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) - continue - } + err = proxier.ipvs.AddRealServer(appliedVirtualServer, newDest) + if err != nil { + glog.Errorf("Failed to add destination: %v, error: %v", newDest, err) + continue + } + } + // Delete old endpoints + for _, ep := range curEndpoints.Difference(newEndpoints).UnsortedList() { + // if curEndpoint is in gracefulDelete, skip + uniqueRS := vs.String() + "/" + ep + if proxier.gracefuldeleteManager.InTerminationList(uniqueRS) { + continue + } + ip, port, err := net.SplitHostPort(ep) + if err != nil { + glog.Errorf("Failed to parse endpoint: %v, error: %v", ep, err) + continue + } + portNum, err := strconv.Atoi(port) + if err != nil { + glog.Errorf("Failed to parse endpoint port %s, error: %v", port, err) + continue + } - delDest := &utilipvs.RealServer{ - Address: net.ParseIP(ip), - Port: uint16(portNum), - } - err = proxier.ipvs.DeleteRealServer(appliedVirtualServer, delDest) - if err != nil { - glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) - continue - } + delDest := &utilipvs.RealServer{ + Address: net.ParseIP(ip), + Port: uint16(portNum), + } + + glog.V(5).Infof("Using graceful delete to delete: %v", delDest) + err = proxier.gracefuldeleteManager.GracefulDeleteRS(appliedVirtualServer, delDest) + if err != nil { + glog.Errorf("Failed to delete destination: %v, error: %v", delDest, err) + continue } } return nil @@ -1559,6 +1591,11 @@ func (proxier *Proxier) cleanLegacyService(activeServices map[string]bool, curre // This service was not processed in the latest sync loop so before deleting it, // make sure it does not fall within an excluded CIDR range. okayToDelete := true + rsList, err := proxier.ipvs.GetRealServers(svc) + if len(rsList) != 0 && err == nil { + glog.V(5).Infof("Will not delete VS: %v, cause it have RS: %v", svc, rsList) + okayToDelete = false + } for _, excludedCIDR := range proxier.excludeCIDRs { // Any validation of this CIDR already should have occurred. _, n, _ := net.ParseCIDR(excludedCIDR) diff --git a/pkg/quota/BUILD b/pkg/quota/BUILD index 1fb06434993..8659f8169d5 100644 --- a/pkg/quota/BUILD +++ b/pkg/quota/BUILD @@ -1,40 +1,5 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = [ - "interfaces.go", - "resources.go", - ], - importpath = "k8s.io/kubernetes/pkg/quota", - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", - "//staging/src/k8s.io/client-go/tools/cache:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = ["resources_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/apis/core:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -46,9 +11,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/quota/evaluator/core:all-srcs", - "//pkg/quota/generic:all-srcs", - "//pkg/quota/install:all-srcs", + "//pkg/quota/v1:all-srcs", ], tags = ["automanaged"], ) diff --git a/pkg/quota/resources_test.go b/pkg/quota/resources_test.go deleted file mode 100644 index 2df5dbd74b0..00000000000 --- a/pkg/quota/resources_test.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package quota - -import ( - "testing" - - "k8s.io/apimachinery/pkg/api/resource" - api "k8s.io/kubernetes/pkg/apis/core" -) - -func TestEquals(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected bool - }{ - "isEqual": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: true, - }, - "isEqualWithKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: true, - }, - "isNotEqualSameKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("200m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: false, - }, - "isNotEqualDiffKeys": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - b: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - api.ResourcePods: resource.MustParse("1"), - }, - expected: false, - }, - } - for testName, testCase := range testCases { - if result := Equals(testCase.a, testCase.b); result != testCase.expected { - t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b) - } - } -} - -func TestMax(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "toEmpty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "matching": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - }, - "matching(reverse)": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("150m")}, - }, - "matching-equal": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - } - for testName, testCase := range testCases { - sum := Max(testCase.a, testCase.b) - if result := Equals(testCase.expected, sum); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) - } - } -} - -func TestAdd(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "toEmpty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "matching": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, - }, - } - for testName, testCase := range testCases { - sum := Add(testCase.a, testCase.b) - if result := Equals(testCase.expected, sum); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) - } - } -} - -func TestSubtract(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - b api.ResourceList - expected api.ResourceList - }{ - "noKeys": { - a: api.ResourceList{}, - b: api.ResourceList{}, - expected: api.ResourceList{}, - }, - "value-empty": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - b: api.ResourceList{}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - "empty-value": { - a: api.ResourceList{}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("-100m")}, - }, - "value-value": { - a: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")}, - b: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - expected: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")}, - }, - } - for testName, testCase := range testCases { - sub := Subtract(testCase.a, testCase.b) - if result := Equals(testCase.expected, sub); !result { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub) - } - } -} - -func TestResourceNames(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected []api.ResourceName - }{ - "empty": { - a: api.ResourceList{}, - expected: []api.ResourceName{}, - }, - "values": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, - }, - } - for testName, testCase := range testCases { - actualSet := ToSet(ResourceNames(testCase.a)) - expectedSet := ToSet(testCase.expected) - if !actualSet.Equal(expectedSet) { - t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) - } - } -} - -func TestContains(t *testing.T) { - testCases := map[string]struct { - a []api.ResourceName - b api.ResourceName - expected bool - }{ - "does-not-contain": { - a: []api.ResourceName{api.ResourceMemory}, - b: api.ResourceCPU, - expected: false, - }, - "does-contain": { - a: []api.ResourceName{api.ResourceMemory, api.ResourceCPU}, - b: api.ResourceCPU, - expected: true, - }, - } - for testName, testCase := range testCases { - if actual := Contains(testCase.a, testCase.b); actual != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) - } - } -} - -func TestContainsPrefix(t *testing.T) { - testCases := map[string]struct { - a []string - b api.ResourceName - expected bool - }{ - "does-not-contain": { - a: []string{api.ResourceHugePagesPrefix}, - b: api.ResourceCPU, - expected: false, - }, - "does-contain": { - a: []string{api.ResourceHugePagesPrefix}, - b: api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"), - expected: true, - }, - } - for testName, testCase := range testCases { - if actual := ContainsPrefix(testCase.a, testCase.b); actual != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) - } - } -} - -func TestIsZero(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected bool - }{ - "empty": { - a: api.ResourceList{}, - expected: true, - }, - "zero": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("0"), - api.ResourceMemory: resource.MustParse("0"), - }, - expected: true, - }, - "non-zero": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("200m"), - api.ResourceMemory: resource.MustParse("1Gi"), - }, - expected: false, - }, - } - for testName, testCase := range testCases { - if result := IsZero(testCase.a); result != testCase.expected { - t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result) - } - } -} - -func TestIsNegative(t *testing.T) { - testCases := map[string]struct { - a api.ResourceList - expected []api.ResourceName - }{ - "empty": { - a: api.ResourceList{}, - expected: []api.ResourceName{}, - }, - "some-negative": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("-10"), - api.ResourceMemory: resource.MustParse("0"), - }, - expected: []api.ResourceName{api.ResourceCPU}, - }, - "all-negative": { - a: api.ResourceList{ - api.ResourceCPU: resource.MustParse("-200m"), - api.ResourceMemory: resource.MustParse("-1Gi"), - }, - expected: []api.ResourceName{api.ResourceCPU, api.ResourceMemory}, - }, - } - for testName, testCase := range testCases { - actual := IsNegative(testCase.a) - actualSet := ToSet(actual) - expectedSet := ToSet(testCase.expected) - if !actualSet.Equal(expectedSet) { - t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) - } - } -} diff --git a/pkg/quota/v1/BUILD b/pkg/quota/v1/BUILD new file mode 100644 index 00000000000..7b3cb195b78 --- /dev/null +++ b/pkg/quota/v1/BUILD @@ -0,0 +1,53 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_library( + name = "go_default_library", + srcs = [ + "interfaces.go", + "resources.go", + ], + importpath = "k8s.io/kubernetes/pkg/quota/v1", + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["resources_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/quota/v1/evaluator/core:all-srcs", + "//pkg/quota/v1/generic:all-srcs", + "//pkg/quota/v1/install:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/quota/OWNERS b/pkg/quota/v1/OWNERS similarity index 100% rename from pkg/quota/OWNERS rename to pkg/quota/v1/OWNERS diff --git a/pkg/quota/evaluator/OWNERS b/pkg/quota/v1/evaluator/OWNERS similarity index 100% rename from pkg/quota/evaluator/OWNERS rename to pkg/quota/v1/evaluator/OWNERS diff --git a/pkg/quota/evaluator/core/BUILD b/pkg/quota/v1/evaluator/core/BUILD similarity index 84% rename from pkg/quota/evaluator/core/BUILD rename to pkg/quota/v1/evaluator/core/BUILD index 13f06d3cb9c..800a34d7525 100644 --- a/pkg/quota/evaluator/core/BUILD +++ b/pkg/quota/v1/evaluator/core/BUILD @@ -15,16 +15,16 @@ go_library( "registry.go", "services.go", ], - importpath = "k8s.io/kubernetes/pkg/quota/evaluator/core", + importpath = "k8s.io/kubernetes/pkg/quota/v1/evaluator/core", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/core/helper:go_default_library", - "//pkg/apis/core/helper/qos:go_default_library", "//pkg/apis/core/v1:go_default_library", + "//pkg/apis/core/v1/helper:go_default_library", + "//pkg/apis/core/v1/helper/qos:go_default_library", "//pkg/features:go_default_library", "//pkg/kubeapiserver/admission/util:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -50,9 +50,10 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//pkg/util/node:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/quota/evaluator/core/doc.go b/pkg/quota/v1/evaluator/core/doc.go similarity index 89% rename from pkg/quota/evaluator/core/doc.go rename to pkg/quota/v1/evaluator/core/doc.go index 3c9d632cbfe..a8649344199 100644 --- a/pkg/quota/evaluator/core/doc.go +++ b/pkg/quota/v1/evaluator/core/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // core contains modules that interface with the core api group -package core // import "k8s.io/kubernetes/pkg/quota/evaluator/core" +package core // import "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" diff --git a/pkg/quota/evaluator/core/persistent_volume_claims.go b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go similarity index 70% rename from pkg/quota/evaluator/core/persistent_volume_claims.go rename to pkg/quota/v1/evaluator/core/persistent_volume_claims.go index 62051e45147..ea8ecad52df 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims.go +++ b/pkg/quota/v1/evaluator/core/persistent_volume_claims.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -30,22 +30,22 @@ import ( "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" k8sfeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) +var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()) // pvcResources are the set of static resources managed by quota associated with pvcs. // for each resource in this list, it may be refined dynamically based on storage class. -var pvcResources = []api.ResourceName{ - api.ResourcePersistentVolumeClaims, - api.ResourceRequestsStorage, +var pvcResources = []corev1.ResourceName{ + corev1.ResourcePersistentVolumeClaims, + corev1.ResourceRequestsStorage, } // storageClassSuffix is the suffix to the qualified portion of storage class resource name. @@ -56,19 +56,21 @@ var pvcResources = []api.ResourceName{ // * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi const storageClassSuffix string = ".storageclass.storage.k8s.io/" +/* TODO: prune? // ResourceByStorageClass returns a quota resource name by storage class. -func ResourceByStorageClass(storageClass string, resourceName api.ResourceName) api.ResourceName { - return api.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) } +*/ // V1ResourceByStorageClass returns a quota resource name by storage class. -func V1ResourceByStorageClass(storageClass string, resourceName v1.ResourceName) v1.ResourceName { - return v1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) +func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName))) } // NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims")) pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace} return pvcEvaluator } @@ -80,14 +82,14 @@ type pvcEvaluator struct { } // Constraints verifies that all required resources are present on the item. -func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // no-op for persistent volume claims return nil } // GroupResource that this evaluator tracks func (p *pvcEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() + return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource() } // Handles returns true if the evaluator should handle the specified operation. @@ -119,27 +121,27 @@ func (p *pvcEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *pvcEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.ResourceName { - result := []api.ResourceName{} +func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName { + result := []corev1.ResourceName{} for _, item := range items { // match object count quota fields - if quota.Contains([]api.ResourceName{pvcObjectCountName}, item) { + if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) { result = append(result, item) continue } @@ -161,15 +163,15 @@ func (p *pvcEvaluator) MatchingResources(items []api.ResourceName) []api.Resourc } // Usage knows how to measure usage associated with item. -func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { - result := api.ResourceList{} - pvc, err := toInternalPersistentVolumeClaimOrError(item) +func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + pvc, err := toExternalPersistentVolumeClaimOrError(item) if err != nil { return result, err } // charge for claim - result[api.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI)) result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) if utilfeature.DefaultFeatureGate.Enabled(features.Initializers) { if !initialization.IsInitialized(pvc.Initializers) { @@ -179,16 +181,16 @@ func (p *pvcEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { } storageClassRef := helper.GetPersistentVolumeClaimClass(pvc) if len(storageClassRef) > 0 { - storageClassClaim := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourcePersistentVolumeClaims)) + storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims)) result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI)) } // charge for storage - if request, found := pvc.Spec.Resources.Requests[api.ResourceStorage]; found { - result[api.ResourceRequestsStorage] = request + if request, found := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; found { + result[corev1.ResourceRequestsStorage] = request // charge usage to the storage class (if present) if len(storageClassRef) > 0 { - storageClassStorage := api.ResourceName(storageClassRef + storageClassSuffix + string(api.ResourceRequestsStorage)) + storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage)) result[storageClassStorage] = request } } @@ -203,15 +205,15 @@ func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageS // ensure we implement required interface var _ quota.Evaluator = &pvcEvaluator{} -func toInternalPersistentVolumeClaimOrError(obj runtime.Object) (*api.PersistentVolumeClaim, error) { - pvc := &api.PersistentVolumeClaim{} +func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) { + pvc := &corev1.PersistentVolumeClaim{} switch t := obj.(type) { - case *v1.PersistentVolumeClaim: - if err := k8s_api_v1.Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(t, pvc, nil); err != nil { + case *corev1.PersistentVolumeClaim: + pvc = t + case *api.PersistentVolumeClaim: + if err := k8s_api_v1.Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(t, pvc, nil); err != nil { return nil, err } - case *api.PersistentVolumeClaim: - pvc = t default: return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t) } diff --git a/pkg/quota/evaluator/core/persistent_volume_claims_test.go b/pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go similarity index 81% rename from pkg/quota/evaluator/core/persistent_volume_claims_test.go rename to pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go index e2b1c69d98a..12e0dc89a3b 100644 --- a/pkg/quota/evaluator/core/persistent_volume_claims_test.go +++ b/pkg/quota/v1/evaluator/core/persistent_volume_claims_test.go @@ -19,12 +19,13 @@ package core import ( "testing" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) func testVolumeClaim(name string, namespace string, spec api.PersistentVolumeClaimSpec) *api.PersistentVolumeClaim { @@ -79,23 +80,23 @@ func TestPersistentVolumeClaimEvaluatorUsage(t *testing.T) { evaluator := NewPersistentVolumeClaimEvaluator(nil) testCases := map[string]struct { pvc *api.PersistentVolumeClaim - usage api.ResourceList + usage corev1.ResourceList }{ "pvc-usage": { pvc: validClaim, - usage: api.ResourceList{ - api.ResourceRequestsStorage: resource.MustParse("10Gi"), - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, "pvc-usage-by-class": { pvc: validClaimByStorageClass, - usage: api.ResourceList{ - api.ResourceRequestsStorage: resource.MustParse("10Gi"), - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - ResourceByStorageClass(classGold, api.ResourceRequestsStorage): resource.MustParse("10Gi"), - ResourceByStorageClass(classGold, api.ResourcePersistentVolumeClaims): resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + V1ResourceByStorageClass(classGold, corev1.ResourceRequestsStorage): resource.MustParse("10Gi"), + V1ResourceByStorageClass(classGold, corev1.ResourcePersistentVolumeClaims): resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "persistentvolumeclaims"}): resource.MustParse("1"), }, }, diff --git a/pkg/quota/evaluator/core/pods.go b/pkg/quota/v1/evaluator/core/pods.go similarity index 64% rename from pkg/quota/evaluator/core/pods.go rename to pkg/quota/v1/evaluator/core/pods.go index 7861c3d0a13..dbf20e5661c 100644 --- a/pkg/quota/evaluator/core/pods.go +++ b/pkg/quota/v1/evaluator/core/pods.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -32,57 +32,57 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/core/helper" - "k8s.io/kubernetes/pkg/apis/core/helper/qos" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" + "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubeapiserver/admission/util" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("pods").GroupResource()) +var podObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("pods").GroupResource()) // podResources are the set of resources managed by quota associated with pods. -var podResources = []api.ResourceName{ +var podResources = []corev1.ResourceName{ podObjectCountName, - api.ResourceCPU, - api.ResourceMemory, - api.ResourceEphemeralStorage, - api.ResourceRequestsCPU, - api.ResourceRequestsMemory, - api.ResourceRequestsEphemeralStorage, - api.ResourceLimitsCPU, - api.ResourceLimitsMemory, - api.ResourceLimitsEphemeralStorage, - api.ResourcePods, + corev1.ResourceCPU, + corev1.ResourceMemory, + corev1.ResourceEphemeralStorage, + corev1.ResourceRequestsCPU, + corev1.ResourceRequestsMemory, + corev1.ResourceRequestsEphemeralStorage, + corev1.ResourceLimitsCPU, + corev1.ResourceLimitsMemory, + corev1.ResourceLimitsEphemeralStorage, + corev1.ResourcePods, } // podResourcePrefixes are the set of prefixes for resources (Hugepages, and other // potential extended reources with specific prefix) managed by quota associated with pods. var podResourcePrefixes = []string{ - api.ResourceHugePagesPrefix, - api.ResourceRequestsHugePagesPrefix, + corev1.ResourceHugePagesPrefix, + corev1.ResourceRequestsHugePagesPrefix, } // requestedResourcePrefixes are the set of prefixes for resources // that might be declared in pod's Resources.Requests/Limits var requestedResourcePrefixes = []string{ - api.ResourceHugePagesPrefix, + corev1.ResourceHugePagesPrefix, } // maskResourceWithPrefix mask resource with certain prefix // e.g. hugepages-XXX -> requests.hugepages-XXX -func maskResourceWithPrefix(resource api.ResourceName, prefix string) api.ResourceName { - return api.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) +func maskResourceWithPrefix(resource corev1.ResourceName, prefix string) corev1.ResourceName { + return corev1.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource))) } // isExtendedResourceNameForQuota returns true if the extended resource name // has the quota related resource prefix. -func isExtendedResourceNameForQuota(name api.ResourceName) bool { +func isExtendedResourceNameForQuota(name corev1.ResourceName) bool { // As overcommit is not supported by extended resources for now, // only quota objects in format of "requests.resourceName" is allowed. - return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), api.DefaultResourceRequestsPrefix) + return !helper.IsNativeResource(name) && strings.HasPrefix(string(name), corev1.DefaultResourceRequestsPrefix) } // NOTE: it was a mistake, but if a quota tracks cpu or memory related resources, @@ -90,17 +90,17 @@ func isExtendedResourceNameForQuota(name api.ResourceName) bool { // this mistake for other future resources (gpus, ephemeral-storage,etc). // do not add more resources to this list! var validationSet = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), + string(corev1.ResourceCPU), + string(corev1.ResourceMemory), + string(corev1.ResourceRequestsCPU), + string(corev1.ResourceRequestsMemory), + string(corev1.ResourceLimitsCPU), + string(corev1.ResourceLimitsMemory), ) // NewPodEvaluator returns an evaluator that can evaluate pods func NewPodEvaluator(f quota.ListerForResourceFunc, clock clock.Clock) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("pods")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("pods")) podEvaluator := &podEvaluator{listFuncByNamespace: listFuncByNamespace, clock: clock} return podEvaluator } @@ -115,10 +115,10 @@ type podEvaluator struct { // Constraints verifies that all required resources are present on the pod // In addition, it validates that the resources are valid (i.e. requests < limits) -func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { - pod, ok := item.(*api.Pod) - if !ok { - return fmt.Errorf("unexpected input object %v", item) +func (p *podEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { + pod, err := toExternalPodOrError(item) + if err != nil { + return err } // BACKWARD COMPATIBILITY REQUIREMENT: if we quota cpu or memory, then each container @@ -141,7 +141,7 @@ func (p *podEvaluator) Constraints(required []api.ResourceName, item runtime.Obj // GroupResource that this evaluator tracks func (p *podEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("pods").GroupResource() + return corev1.SchemeGroupVersion.WithResource("pods").GroupResource() } // Handles returns true if the evaluator should handle the specified attributes. @@ -161,12 +161,12 @@ func (p *podEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *podEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, podMatchesScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (p *podEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { result := quota.Intersection(input, podResources) for _, resource := range input { // for resources with certain prefix, e.g. hugepages @@ -183,12 +183,12 @@ func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.Resourc } // MatchingScopes takes the input specified list of scopes and pod object. Returns the set of scope selectors pod matches. -func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - matchedScopes := []api.ScopedResourceSelectorRequirement{} +func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + matchedScopes := []corev1.ScopedResourceSelectorRequirement{} for _, selector := range scopeSelectors { match, err := podMatchesScopeFunc(selector, item) if err != nil { - return []api.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err) + return []corev1.ScopedResourceSelectorRequirement{}, fmt.Errorf("error on matching scope %v: %v", selector, err) } if match { matchedScopes = append(matchedScopes, selector) @@ -199,8 +199,8 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []api. // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - uncoveredScopes := []api.ScopedResourceSelectorRequirement{} +func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{} for _, selector := range limitedScopes { isCovered := false for _, matchedScopeSelector := range matchedQuotaScopes { @@ -218,7 +218,7 @@ func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSe } // Usage knows how to measure usage associated with pods -func (p *podEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { +func (p *podEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { // delegate to normal usage return PodUsageFunc(item, p.clock) } @@ -233,7 +233,7 @@ var _ quota.Evaluator = &podEvaluator{} // enforcePodContainerConstraints checks for required resources that are not set on this container and // adds them to missingSet. -func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) { +func enforcePodContainerConstraints(container *corev1.Container, requiredSet, missingSet sets.String) { requests := container.Resources.Requests limits := container.Resources.Limits containerUsage := podComputeUsageHelper(requests, limits) @@ -245,55 +245,55 @@ func enforcePodContainerConstraints(container *api.Container, requiredSet, missi } // podComputeUsageHelper can summarize the pod compute quota usage based on requests and limits -func podComputeUsageHelper(requests api.ResourceList, limits api.ResourceList) api.ResourceList { - result := api.ResourceList{} - result[api.ResourcePods] = resource.MustParse("1") - if request, found := requests[api.ResourceCPU]; found { - result[api.ResourceCPU] = request - result[api.ResourceRequestsCPU] = request +func podComputeUsageHelper(requests corev1.ResourceList, limits corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} + result[corev1.ResourcePods] = resource.MustParse("1") + if request, found := requests[corev1.ResourceCPU]; found { + result[corev1.ResourceCPU] = request + result[corev1.ResourceRequestsCPU] = request } - if limit, found := limits[api.ResourceCPU]; found { - result[api.ResourceLimitsCPU] = limit + if limit, found := limits[corev1.ResourceCPU]; found { + result[corev1.ResourceLimitsCPU] = limit } - if request, found := requests[api.ResourceMemory]; found { - result[api.ResourceMemory] = request - result[api.ResourceRequestsMemory] = request + if request, found := requests[corev1.ResourceMemory]; found { + result[corev1.ResourceMemory] = request + result[corev1.ResourceRequestsMemory] = request } - if limit, found := limits[api.ResourceMemory]; found { - result[api.ResourceLimitsMemory] = limit + if limit, found := limits[corev1.ResourceMemory]; found { + result[corev1.ResourceLimitsMemory] = limit } - if request, found := requests[api.ResourceEphemeralStorage]; found { - result[api.ResourceEphemeralStorage] = request - result[api.ResourceRequestsEphemeralStorage] = request + if request, found := requests[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceEphemeralStorage] = request + result[corev1.ResourceRequestsEphemeralStorage] = request } - if limit, found := limits[api.ResourceEphemeralStorage]; found { - result[api.ResourceLimitsEphemeralStorage] = limit + if limit, found := limits[corev1.ResourceEphemeralStorage]; found { + result[corev1.ResourceLimitsEphemeralStorage] = limit } for resource, request := range requests { // for resources with certain prefix, e.g. hugepages if quota.ContainsPrefix(requestedResourcePrefixes, resource) { result[resource] = request - result[maskResourceWithPrefix(resource, api.DefaultResourceRequestsPrefix)] = request + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request } // for extended resources if helper.IsExtendedResourceName(resource) { // only quota objects in format of "requests.resourceName" is allowed for extended resource. - result[maskResourceWithPrefix(resource, api.DefaultResourceRequestsPrefix)] = request + result[maskResourceWithPrefix(resource, corev1.DefaultResourceRequestsPrefix)] = request } } return result } -func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) { - pod := &api.Pod{} +func toExternalPodOrError(obj runtime.Object) (*corev1.Pod, error) { + pod := &corev1.Pod{} switch t := obj.(type) { - case *v1.Pod: - if err := k8s_api_v1.Convert_v1_Pod_To_core_Pod(t, pod, nil); err != nil { + case *corev1.Pod: + pod = t + case *api.Pod: + if err := k8s_api_v1.Convert_core_Pod_To_v1_Pod(t, pod, nil); err != nil { return nil, err } - case *api.Pod: - pod = t default: return nil, fmt.Errorf("expect *api.Pod or *v1.Pod, got %v", t) } @@ -301,21 +301,21 @@ func toInternalPodOrError(obj runtime.Object) (*api.Pod, error) { } // podMatchesScopeFunc is a function that knows how to evaluate if a pod matches a scope -func podMatchesScopeFunc(selector api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { - pod, err := toInternalPodOrError(object) +func podMatchesScopeFunc(selector corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { + pod, err := toExternalPodOrError(object) if err != nil { return false, err } switch selector.ScopeName { - case api.ResourceQuotaScopeTerminating: + case corev1.ResourceQuotaScopeTerminating: return isTerminating(pod), nil - case api.ResourceQuotaScopeNotTerminating: + case corev1.ResourceQuotaScopeNotTerminating: return !isTerminating(pod), nil - case api.ResourceQuotaScopeBestEffort: + case corev1.ResourceQuotaScopeBestEffort: return isBestEffort(pod), nil - case api.ResourceQuotaScopeNotBestEffort: + case corev1.ResourceQuotaScopeNotBestEffort: return !isBestEffort(pod), nil - case api.ResourceQuotaScopePriorityClass: + case corev1.ResourceQuotaScopePriorityClass: return podMatchesSelector(pod, selector) } return false, nil @@ -325,28 +325,28 @@ func podMatchesScopeFunc(selector api.ScopedResourceSelectorRequirement, object // A pod is charged for quota if the following are not true. // - pod has a terminal phase (failed or succeeded) // - pod has been marked for deletion and grace period has expired -func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, error) { - pod, err := toInternalPodOrError(obj) +func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, error) { + pod, err := toExternalPodOrError(obj) if err != nil { - return api.ResourceList{}, err + return corev1.ResourceList{}, err } // always quota the object count (even if the pod is end of life) // object count quotas track all objects that are in storage. // where "pods" tracks all pods that have not reached a terminal state, // count/pods tracks all pods independent of state. - result := api.ResourceList{ + result := corev1.ResourceList{ podObjectCountName: *(resource.NewQuantity(1, resource.DecimalSI)), } // by convention, we do not quota compute resources that have reached end-of life // note: the "pods" resource is considered a compute resource since it is tied to life-cycle. - if !QuotaPod(pod, clock) { + if !QuotaV1Pod(pod, clock) { return result, nil } - requests := api.ResourceList{} - limits := api.ResourceList{} + requests := corev1.ResourceList{} + limits := corev1.ResourceList{} // TODO: ideally, we have pod level requests and limits in the future. for i := range pod.Spec.Containers { requests = quota.Add(requests, pod.Spec.Containers[i].Resources.Requests) @@ -364,25 +364,25 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (api.ResourceList, erro return result, nil } -func isBestEffort(pod *api.Pod) bool { - return qos.GetPodQOS(pod) == api.PodQOSBestEffort +func isBestEffort(pod *corev1.Pod) bool { + return qos.GetPodQOS(pod) == corev1.PodQOSBestEffort } -func isTerminating(pod *api.Pod) bool { +func isTerminating(pod *corev1.Pod) bool { if pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) { return true } return false } -func podMatchesSelector(pod *api.Pod, selector api.ScopedResourceSelectorRequirement) (bool, error) { +func podMatchesSelector(pod *corev1.Pod, selector corev1.ScopedResourceSelectorRequirement) (bool, error) { labelSelector, err := helper.ScopedResourceSelectorRequirementsAsSelector(selector) if err != nil { return false, fmt.Errorf("failed to parse and convert selector: %v", err) } var m map[string]string if len(pod.Spec.PriorityClassName) != 0 { - m = map[string]string{string(api.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName} + m = map[string]string{string(corev1.ResourceQuotaScopePriorityClass): pod.Spec.PriorityClassName} } if labelSelector.Matches(labels.Set(m)) { return true, nil @@ -390,36 +390,11 @@ func podMatchesSelector(pod *api.Pod, selector api.ScopedResourceSelectorRequire return false, nil } -// QuotaPod returns true if the pod is eligible to track against a quota -// A pod is eligible for quota, unless any of the following are true: -// - pod has a terminal phase (failed or succeeded) -// - pod has been marked for deletion and grace period has expired. -func QuotaPod(pod *api.Pod, clock clock.Clock) bool { - // if pod is terminal, ignore it for quota - if api.PodFailed == pod.Status.Phase || api.PodSucceeded == pod.Status.Phase { - return false - } - // deleted pods that should be gone should not be charged to user quota. - // this can happen if a node is lost, and the kubelet is never able to confirm deletion. - // even though the cluster may have drifting clocks, quota makes a reasonable effort - // to balance cluster needs against user needs. user's do not control clocks, - // but at worst a small drive in clocks will only slightly impact quota. - if pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil { - now := clock.Now() - deletionTime := pod.DeletionTimestamp.Time - gracePeriod := time.Duration(*pod.DeletionGracePeriodSeconds) * time.Second - if now.After(deletionTime.Add(gracePeriod)) { - return false - } - } - return true -} - // QuotaV1Pod returns true if the pod is eligible to track against a quota // if it's not in a terminal state according to its phase. -func QuotaV1Pod(pod *v1.Pod, clock clock.Clock) bool { +func QuotaV1Pod(pod *corev1.Pod, clock clock.Clock) bool { // if pod is terminal, ignore it for quota - if v1.PodFailed == pod.Status.Phase || v1.PodSucceeded == pod.Status.Phase { + if corev1.PodFailed == pod.Status.Phase || corev1.PodSucceeded == pod.Status.Phase { return false } // if pods are stuck terminating (for example, a node is lost), we do not want diff --git a/pkg/quota/evaluator/core/pods_test.go b/pkg/quota/v1/evaluator/core/pods_test.go similarity index 74% rename from pkg/quota/evaluator/core/pods_test.go rename to pkg/quota/v1/evaluator/core/pods_test.go index 4d0744373fa..8d0b93c9f8f 100644 --- a/pkg/quota/evaluator/core/pods_test.go +++ b/pkg/quota/v1/evaluator/core/pods_test.go @@ -20,20 +20,21 @@ import ( "testing" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" "k8s.io/kubernetes/pkg/util/node" ) func TestPodConstraintsFunc(t *testing.T) { testCases := map[string]struct { pod *api.Pod - required []api.ResourceName + required []corev1.ResourceName err string }{ "init container resource missing": { @@ -47,7 +48,7 @@ func TestPodConstraintsFunc(t *testing.T) { }}, }, }, - required: []api.ResourceName{api.ResourceMemory}, + required: []corev1.ResourceName{corev1.ResourceMemory}, err: `must specify memory`, }, "container resource missing": { @@ -61,7 +62,7 @@ func TestPodConstraintsFunc(t *testing.T) { }}, }, }, - required: []api.ResourceName{api.ResourceMemory}, + required: []corev1.ResourceName{corev1.ResourceMemory}, err: `must specify memory`, }, } @@ -90,7 +91,7 @@ func TestPodEvaluatorUsage(t *testing.T) { testCases := map[string]struct { pod *api.Pod - usage api.ResourceList + usage corev1.ResourceList }{ "init container CPU": { pod: &api.Pod{ @@ -103,11 +104,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1m"), - api.ResourceLimitsCPU: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1m"), + corev1.ResourceLimitsCPU: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -122,11 +123,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsMemory: resource.MustParse("1m"), - api.ResourceLimitsMemory: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("1m"), + corev1.ResourceLimitsMemory: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -141,11 +142,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -159,10 +160,10 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourceName(corev1.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -177,9 +178,9 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -194,11 +195,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1m"), - api.ResourceLimitsCPU: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1m"), + corev1.ResourceLimitsCPU: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -213,11 +214,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceRequestsMemory: resource.MustParse("1m"), - api.ResourceLimitsMemory: resource.MustParse("2m"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("1m"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsMemory: resource.MustParse("1m"), + corev1.ResourceLimitsMemory: resource.MustParse("2m"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1m"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -232,11 +233,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), - api.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceRequestsEphemeralStorage: resource.MustParse("32Mi"), + corev1.ResourceLimitsEphemeralStorage: resource.MustParse("64Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -250,10 +251,10 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -268,9 +269,9 @@ func TestPodEvaluatorUsage(t *testing.T) { }}, }, }, - usage: api.ResourceList{ - api.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), - api.ResourcePods: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -339,15 +340,15 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("4"), - api.ResourceRequestsMemory: resource.MustParse("100M"), - api.ResourceLimitsCPU: resource.MustParse("8"), - api.ResourceLimitsMemory: resource.MustParse("200M"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("4"), - api.ResourceMemory: resource.MustParse("100M"), - api.ResourceName("requests.example.com/dongle"): resource.MustParse("4"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("4"), + corev1.ResourceRequestsMemory: resource.MustParse("100M"), + corev1.ResourceLimitsCPU: resource.MustParse("8"), + corev1.ResourceLimitsMemory: resource.MustParse("200M"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("100M"), + corev1.ResourceName("requests.example.com/dongle"): resource.MustParse("4"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -378,7 +379,7 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ + usage: corev1.ResourceList{ generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, @@ -406,11 +407,11 @@ func TestPodEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), - api.ResourceLimitsCPU: resource.MustParse("2"), - api.ResourcePods: resource.MustParse("1"), - api.ResourceCPU: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), + corev1.ResourceLimitsCPU: resource.MustParse("2"), + corev1.ResourcePods: resource.MustParse("1"), + corev1.ResourceCPU: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"), }, }, diff --git a/pkg/quota/evaluator/core/registry.go b/pkg/quota/v1/evaluator/core/registry.go similarity index 70% rename from pkg/quota/evaluator/core/registry.go rename to pkg/quota/v1/evaluator/core/registry.go index ba54143c32e..43a86d318ce 100644 --- a/pkg/quota/evaluator/core/registry.go +++ b/pkg/quota/v1/evaluator/core/registry.go @@ -17,20 +17,19 @@ limitations under the License. package core import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/clock" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // legacyObjectCountAliases are what we used to do simple object counting quota with mapped to alias -var legacyObjectCountAliases = map[schema.GroupVersionResource]api.ResourceName{ - v1.SchemeGroupVersion.WithResource("configmaps"): api.ResourceConfigMaps, - v1.SchemeGroupVersion.WithResource("resourcequotas"): api.ResourceQuotas, - v1.SchemeGroupVersion.WithResource("replicationcontrollers"): api.ResourceReplicationControllers, - v1.SchemeGroupVersion.WithResource("secrets"): api.ResourceSecrets, +var legacyObjectCountAliases = map[schema.GroupVersionResource]corev1.ResourceName{ + corev1.SchemeGroupVersion.WithResource("configmaps"): corev1.ResourceConfigMaps, + corev1.SchemeGroupVersion.WithResource("resourcequotas"): corev1.ResourceQuotas, + corev1.SchemeGroupVersion.WithResource("replicationcontrollers"): corev1.ResourceReplicationControllers, + corev1.SchemeGroupVersion.WithResource("secrets"): corev1.ResourceSecrets, } // NewEvaluators returns the list of static evaluators that manage more than counts diff --git a/pkg/quota/evaluator/core/services.go b/pkg/quota/v1/evaluator/core/services.go similarity index 64% rename from pkg/quota/evaluator/core/services.go rename to pkg/quota/v1/evaluator/core/services.go index 006c0e9b113..eaebcc698a2 100644 --- a/pkg/quota/evaluator/core/services.go +++ b/pkg/quota/v1/evaluator/core/services.go @@ -19,31 +19,31 @@ package core import ( "fmt" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" api "k8s.io/kubernetes/pkg/apis/core" k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) // the name used for object count quota -var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(v1.SchemeGroupVersion.WithResource("services").GroupResource()) +var serviceObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("services").GroupResource()) // serviceResources are the set of resources managed by quota associated with services. -var serviceResources = []api.ResourceName{ +var serviceResources = []corev1.ResourceName{ serviceObjectCountName, - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, } // NewServiceEvaluator returns an evaluator that can evaluate services. func NewServiceEvaluator(f quota.ListerForResourceFunc) quota.Evaluator { - listFuncByNamespace := generic.ListResourceUsingListerFunc(f, v1.SchemeGroupVersion.WithResource("services")) + listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("services")) serviceEvaluator := &serviceEvaluator{listFuncByNamespace: listFuncByNamespace} return serviceEvaluator } @@ -55,14 +55,14 @@ type serviceEvaluator struct { } // Constraints verifies that all required resources are present on the item -func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (p *serviceEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // this is a no-op for services return nil } // GroupResource that this evaluator tracks func (p *serviceEvaluator) GroupResource() schema.GroupResource { - return v1.SchemeGroupVersion.WithResource("services").GroupResource() + return corev1.SchemeGroupVersion.WithResource("services").GroupResource() } // Handles returns true of the evaluator should handle the specified operation. @@ -73,36 +73,36 @@ func (p *serviceEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (p *serviceEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (p *serviceEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (p *serviceEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (p *serviceEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { return quota.Intersection(input, serviceResources) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // convert the input object to an internal service object or error. -func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) { - svc := &api.Service{} +func toExternalServiceOrError(obj runtime.Object) (*corev1.Service, error) { + svc := &corev1.Service{} switch t := obj.(type) { - case *v1.Service: - if err := k8s_api_v1.Convert_v1_Service_To_core_Service(t, svc, nil); err != nil { + case *corev1.Service: + svc = t + case *api.Service: + if err := k8s_api_v1.Convert_core_Service_To_v1_Service(t, svc, nil); err != nil { return nil, err } - case *api.Service: - svc = t default: return nil, fmt.Errorf("expect *api.Service or *v1.Service, got %v", t) } @@ -110,28 +110,28 @@ func toInternalServiceOrError(obj runtime.Object) (*api.Service, error) { } // Usage knows how to measure usage associated with services -func (p *serviceEvaluator) Usage(item runtime.Object) (api.ResourceList, error) { - result := api.ResourceList{} - svc, err := toInternalServiceOrError(item) +func (p *serviceEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) { + result := corev1.ResourceList{} + svc, err := toExternalServiceOrError(item) if err != nil { return result, err } ports := len(svc.Spec.Ports) // default service usage result[serviceObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI)) - result[api.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) - result[api.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} - result[api.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} + result[corev1.ResourceServices] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServicesLoadBalancers] = resource.Quantity{Format: resource.DecimalSI} + result[corev1.ResourceServicesNodePorts] = resource.Quantity{Format: resource.DecimalSI} switch svc.Spec.Type { - case api.ServiceTypeNodePort: + case corev1.ServiceTypeNodePort: // node port services need to count node ports value := resource.NewQuantity(int64(ports), resource.DecimalSI) - result[api.ResourceServicesNodePorts] = *value - case api.ServiceTypeLoadBalancer: + result[corev1.ResourceServicesNodePorts] = *value + case corev1.ServiceTypeLoadBalancer: // load balancer services need to count node ports and load balancers value := resource.NewQuantity(int64(ports), resource.DecimalSI) - result[api.ResourceServicesNodePorts] = *value - result[api.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI)) + result[corev1.ResourceServicesNodePorts] = *value + result[corev1.ResourceServicesLoadBalancers] = *(resource.NewQuantity(1, resource.DecimalSI)) } return result, nil } @@ -144,12 +144,12 @@ func (p *serviceEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.Us var _ quota.Evaluator = &serviceEvaluator{} //GetQuotaServiceType returns ServiceType if the service type is eligible to track against a quota, nor return "" -func GetQuotaServiceType(service *v1.Service) v1.ServiceType { +func GetQuotaServiceType(service *corev1.Service) corev1.ServiceType { switch service.Spec.Type { - case v1.ServiceTypeNodePort: - return v1.ServiceTypeNodePort - case v1.ServiceTypeLoadBalancer: - return v1.ServiceTypeLoadBalancer + case corev1.ServiceTypeNodePort: + return corev1.ServiceTypeNodePort + case corev1.ServiceTypeLoadBalancer: + return corev1.ServiceTypeLoadBalancer } - return v1.ServiceType("") + return corev1.ServiceType("") } diff --git a/pkg/quota/evaluator/core/services_test.go b/pkg/quota/v1/evaluator/core/services_test.go similarity index 67% rename from pkg/quota/evaluator/core/services_test.go rename to pkg/quota/v1/evaluator/core/services_test.go index 601397ce193..b9f227a0c82 100644 --- a/pkg/quota/evaluator/core/services_test.go +++ b/pkg/quota/v1/evaluator/core/services_test.go @@ -19,28 +19,29 @@ package core import ( "testing" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime/schema" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" ) func TestServiceEvaluatorMatchesResources(t *testing.T) { evaluator := NewServiceEvaluator(nil) // we give a lot of resources - input := []api.ResourceName{ - api.ResourceConfigMaps, - api.ResourceCPU, - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + input := []corev1.ResourceName{ + corev1.ResourceConfigMaps, + corev1.ResourceCPU, + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, } // but we only match these... - expected := quota.ToSet([]api.ResourceName{ - api.ResourceServices, - api.ResourceServicesNodePorts, - api.ResourceServicesLoadBalancers, + expected := quota.ToSet([]corev1.ResourceName{ + corev1.ResourceServices, + corev1.ResourceServicesNodePorts, + corev1.ResourceServicesLoadBalancers, }) actual := quota.ToSet(evaluator.MatchingResources(input)) if !expected.Equal(actual) { @@ -52,7 +53,7 @@ func TestServiceEvaluatorUsage(t *testing.T) { evaluator := NewServiceEvaluator(nil) testCases := map[string]struct { service *api.Service - usage api.ResourceList + usage corev1.ResourceList }{ "loadbalancer": { service: &api.Service{ @@ -60,10 +61,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { Type: api.ServiceTypeLoadBalancer, }, }, - usage: api.ResourceList{ - api.ResourceServicesNodePorts: resource.MustParse("0"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServices: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceServicesNodePorts: resource.MustParse("0"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServices: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -78,10 +79,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServicesNodePorts: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServices: resource.MustParse("1"), + usage: corev1.ResourceList{ + corev1.ResourceServicesNodePorts: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServices: resource.MustParse("1"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -91,10 +92,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { Type: api.ServiceTypeClusterIP, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -109,10 +110,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -130,10 +131,10 @@ func TestServiceEvaluatorUsage(t *testing.T) { }, }, }, - usage: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("2"), - api.ResourceServicesLoadBalancers: resource.MustParse("0"), + usage: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("2"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("0"), generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "services"}): resource.MustParse("1"), }, }, @@ -152,7 +153,7 @@ func TestServiceEvaluatorUsage(t *testing.T) { func TestServiceConstraintsFunc(t *testing.T) { testCases := map[string]struct { service *api.Service - required []api.ResourceName + required []corev1.ResourceName err string }{ "loadbalancer": { @@ -161,7 +162,7 @@ func TestServiceConstraintsFunc(t *testing.T) { Type: api.ServiceTypeLoadBalancer, }, }, - required: []api.ResourceName{api.ResourceServicesLoadBalancers}, + required: []corev1.ResourceName{corev1.ResourceServicesLoadBalancers}, }, "clusterip": { service: &api.Service{ @@ -169,7 +170,7 @@ func TestServiceConstraintsFunc(t *testing.T) { Type: api.ServiceTypeClusterIP, }, }, - required: []api.ResourceName{api.ResourceServicesLoadBalancers, api.ResourceServices}, + required: []corev1.ResourceName{corev1.ResourceServicesLoadBalancers, corev1.ResourceServices}, }, "nodeports": { service: &api.Service{ @@ -182,7 +183,7 @@ func TestServiceConstraintsFunc(t *testing.T) { }, }, }, - required: []api.ResourceName{api.ResourceServicesNodePorts}, + required: []corev1.ResourceName{corev1.ResourceServicesNodePorts}, }, "multi-nodeports": { service: &api.Service{ @@ -198,7 +199,7 @@ func TestServiceConstraintsFunc(t *testing.T) { }, }, }, - required: []api.ResourceName{api.ResourceServicesNodePorts}, + required: []corev1.ResourceName{corev1.ResourceServicesNodePorts}, }, } diff --git a/pkg/quota/generic/BUILD b/pkg/quota/v1/generic/BUILD similarity index 86% rename from pkg/quota/generic/BUILD rename to pkg/quota/v1/generic/BUILD index d7c63ceb69d..c99ade4e3f5 100644 --- a/pkg/quota/generic/BUILD +++ b/pkg/quota/v1/generic/BUILD @@ -12,10 +12,10 @@ go_library( "evaluator.go", "registry.go", ], - importpath = "k8s.io/kubernetes/pkg/quota/generic", + importpath = "k8s.io/kubernetes/pkg/quota/v1/generic", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/quota:go_default_library", + "//pkg/quota/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/quota/generic/OWNERS b/pkg/quota/v1/generic/OWNERS similarity index 100% rename from pkg/quota/generic/OWNERS rename to pkg/quota/v1/generic/OWNERS diff --git a/pkg/quota/generic/configuration.go b/pkg/quota/v1/generic/configuration.go similarity index 96% rename from pkg/quota/generic/configuration.go rename to pkg/quota/v1/generic/configuration.go index 59c009e13d3..1a1acc44185 100644 --- a/pkg/quota/generic/configuration.go +++ b/pkg/quota/v1/generic/configuration.go @@ -18,7 +18,7 @@ package generic import ( "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // implements a basic configuration diff --git a/pkg/quota/generic/evaluator.go b/pkg/quota/v1/generic/evaluator.go similarity index 78% rename from pkg/quota/generic/evaluator.go rename to pkg/quota/v1/generic/evaluator.go index 60da7d634bb..4e377175445 100644 --- a/pkg/quota/generic/evaluator.go +++ b/pkg/quota/v1/generic/evaluator.go @@ -19,6 +19,7 @@ package generic import ( "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -26,8 +27,7 @@ import ( "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/informers" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // InformerForResourceFunc knows how to provision an informer @@ -56,33 +56,33 @@ func ListResourceUsingListerFunc(l quota.ListerForResourceFunc, resource schema. } // ObjectCountQuotaResourceNameFor returns the object count quota name for specified groupResource -func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) api.ResourceName { +func ObjectCountQuotaResourceNameFor(groupResource schema.GroupResource) corev1.ResourceName { if len(groupResource.Group) == 0 { - return api.ResourceName("count/" + groupResource.Resource) + return corev1.ResourceName("count/" + groupResource.Resource) } - return api.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) + return corev1.ResourceName("count/" + groupResource.Resource + "." + groupResource.Group) } // ListFuncByNamespace knows how to list resources in a namespace type ListFuncByNamespace func(namespace string) ([]runtime.Object, error) // MatchesScopeFunc knows how to evaluate if an object matches a scope -type MatchesScopeFunc func(scope api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) +type MatchesScopeFunc func(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) // UsageFunc knows how to measure usage associated with an object -type UsageFunc func(object runtime.Object) (api.ResourceList, error) +type UsageFunc func(object runtime.Object) (corev1.ResourceList, error) // MatchingResourceNamesFunc is a function that returns the list of resources matched -type MatchingResourceNamesFunc func(input []api.ResourceName) []api.ResourceName +type MatchingResourceNamesFunc func(input []corev1.ResourceName) []corev1.ResourceName // MatchesNoScopeFunc returns false on all match checks -func MatchesNoScopeFunc(scope api.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { +func MatchesNoScopeFunc(scope corev1.ScopedResourceSelectorRequirement, object runtime.Object) (bool, error) { return false, nil } // Matches returns true if the quota matches the specified item. func Matches( - resourceQuota *api.ResourceQuota, item runtime.Object, + resourceQuota *corev1.ResourceQuota, item runtime.Object, matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) { if resourceQuota == nil { return false, fmt.Errorf("expected non-nil quota") @@ -101,12 +101,12 @@ func Matches( return matchResource && matchScope, nil } -func getScopeSelectorsFromQuota(quota *api.ResourceQuota) []api.ScopedResourceSelectorRequirement { - selectors := []api.ScopedResourceSelectorRequirement{} +func getScopeSelectorsFromQuota(quota *corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} for _, scope := range quota.Spec.Scopes { - selectors = append(selectors, api.ScopedResourceSelectorRequirement{ + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ ScopeName: scope, - Operator: api.ScopeSelectorOpExists}) + Operator: corev1.ScopeSelectorOpExists}) } if quota.Spec.ScopeSelector != nil { for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions { @@ -122,7 +122,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, scopeFunc MatchesScopeFunc, usageFunc UsageFunc) (quota.UsageStats, error) { // default each tracked resource to zero - result := quota.UsageStats{Used: api.ResourceList{}} + result := quota.UsageStats{Used: corev1.ResourceList{}} for _, resourceName := range options.Resources { result.Used[resourceName] = resource.Quantity{Format: resource.DecimalSI} } @@ -134,7 +134,7 @@ func CalculateUsageStats(options quota.UsageStatsOptions, // need to verify that the item matches the set of scopes matchesScopes := true for _, scope := range options.Scopes { - innerMatch, err := scopeFunc(api.ScopedResourceSelectorRequirement{ScopeName: scope}, item) + innerMatch, err := scopeFunc(corev1.ScopedResourceSelectorRequirement{ScopeName: scope}, item) if err != nil { return result, nil } @@ -174,11 +174,11 @@ type objectCountEvaluator struct { // TODO move to dynamic client in future listFuncByNamespace ListFuncByNamespace // Names associated with this resource in the quota for generic counting. - resourceNames []api.ResourceName + resourceNames []corev1.ResourceName } // Constraints returns an error if the configured resource name is not in the required set. -func (o *objectCountEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error { +func (o *objectCountEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error { // no-op for object counting return nil } @@ -190,30 +190,30 @@ func (o *objectCountEvaluator) Handles(a admission.Attributes) bool { } // Matches returns true if the evaluator matches the specified quota with the provided input item -func (o *objectCountEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) { +func (o *objectCountEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) { return Matches(resourceQuota, item, o.MatchingResources, MatchesNoScopeFunc) } // MatchingResources takes the input specified list of resources and returns the set of resources it matches. -func (o *objectCountEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName { +func (o *objectCountEvaluator) MatchingResources(input []corev1.ResourceName) []corev1.ResourceName { return quota.Intersection(input, o.resourceNames) } // MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches. -func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (o *objectCountEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. // It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope -func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) { - return []api.ScopedResourceSelectorRequirement{}, nil +func (o *objectCountEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) { + return []corev1.ScopedResourceSelectorRequirement{}, nil } // Usage returns the resource usage for the specified object -func (o *objectCountEvaluator) Usage(object runtime.Object) (api.ResourceList, error) { +func (o *objectCountEvaluator) Usage(object runtime.Object) (corev1.ResourceList, error) { quantity := resource.NewQuantity(1, resource.DecimalSI) - resourceList := api.ResourceList{} + resourceList := corev1.ResourceList{} for _, resourceName := range o.resourceNames { resourceList[resourceName] = *quantity } @@ -239,9 +239,9 @@ var _ quota.Evaluator = &objectCountEvaluator{} // backward compatibility, alias should not be used. func NewObjectCountEvaluator( groupResource schema.GroupResource, listFuncByNamespace ListFuncByNamespace, - alias api.ResourceName) quota.Evaluator { + alias corev1.ResourceName) quota.Evaluator { - resourceNames := []api.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} + resourceNames := []corev1.ResourceName{ObjectCountQuotaResourceNameFor(groupResource)} if len(alias) > 0 { resourceNames = append(resourceNames, alias) } diff --git a/pkg/quota/generic/registry.go b/pkg/quota/v1/generic/registry.go similarity index 98% rename from pkg/quota/generic/registry.go rename to pkg/quota/v1/generic/registry.go index fdc38e02b1c..10404a3f288 100644 --- a/pkg/quota/generic/registry.go +++ b/pkg/quota/v1/generic/registry.go @@ -20,7 +20,7 @@ import ( "sync" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" + quota "k8s.io/kubernetes/pkg/quota/v1" ) // implements a basic registry diff --git a/pkg/quota/install/BUILD b/pkg/quota/v1/install/BUILD similarity index 70% rename from pkg/quota/install/BUILD rename to pkg/quota/v1/install/BUILD index 31678067ea6..96425fa422c 100644 --- a/pkg/quota/install/BUILD +++ b/pkg/quota/v1/install/BUILD @@ -8,11 +8,11 @@ load( go_library( name = "go_default_library", srcs = ["registry.go"], - importpath = "k8s.io/kubernetes/pkg/quota/install", + importpath = "k8s.io/kubernetes/pkg/quota/v1/install", deps = [ - "//pkg/quota:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", ], ) diff --git a/pkg/quota/install/OWNERS b/pkg/quota/v1/install/OWNERS similarity index 100% rename from pkg/quota/install/OWNERS rename to pkg/quota/v1/install/OWNERS diff --git a/pkg/quota/install/registry.go b/pkg/quota/v1/install/registry.go similarity index 91% rename from pkg/quota/install/registry.go rename to pkg/quota/v1/install/registry.go index dd4596d310a..b870368530f 100644 --- a/pkg/quota/install/registry.go +++ b/pkg/quota/v1/install/registry.go @@ -18,9 +18,9 @@ package install import ( "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/evaluator/core" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + core "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" + generic "k8s.io/kubernetes/pkg/quota/v1/generic" ) // NewQuotaConfigurationForAdmission returns a quota configuration for admission control. diff --git a/pkg/quota/interfaces.go b/pkg/quota/v1/interfaces.go similarity index 80% rename from pkg/quota/interfaces.go rename to pkg/quota/v1/interfaces.go index e6723b8aef7..d71b6641830 100644 --- a/pkg/quota/interfaces.go +++ b/pkg/quota/v1/interfaces.go @@ -17,11 +17,11 @@ limitations under the License. package quota import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/tools/cache" - api "k8s.io/kubernetes/pkg/apis/core" ) // UsageStatsOptions is an options structs that describes how stats should be calculated @@ -29,37 +29,37 @@ type UsageStatsOptions struct { // Namespace where stats should be calculate Namespace string // Scopes that must match counted objects - Scopes []api.ResourceQuotaScope + Scopes []corev1.ResourceQuotaScope // Resources are the set of resources to include in the measurement - Resources []api.ResourceName - ScopeSelector *api.ScopeSelector + Resources []corev1.ResourceName + ScopeSelector *corev1.ScopeSelector } // UsageStats is result of measuring observed resource use in the system type UsageStats struct { // Used maps resource to quantity used - Used api.ResourceList + Used corev1.ResourceList } // Evaluator knows how to evaluate quota usage for a particular group resource type Evaluator interface { // Constraints ensures that each required resource is present on item - Constraints(required []api.ResourceName, item runtime.Object) error + Constraints(required []corev1.ResourceName, item runtime.Object) error // GroupResource returns the groupResource that this object knows how to evaluate GroupResource() schema.GroupResource // Handles determines if quota could be impacted by the specified attribute. // If true, admission control must perform quota processing for the operation, otherwise it is safe to ignore quota. Handles(operation admission.Attributes) bool // Matches returns true if the specified quota matches the input item - Matches(resourceQuota *api.ResourceQuota, item runtime.Object) (bool, error) + Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) // MatchingScopes takes the input specified list of scopes and input object and returns the set of scopes that matches input object. - MatchingScopes(item runtime.Object, scopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes. It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope - UncoveredQuotaScopes(limitedScopes []api.ScopedResourceSelectorRequirement, matchedQuotaScopes []api.ScopedResourceSelectorRequirement) ([]api.ScopedResourceSelectorRequirement, error) + UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) // MatchingResources takes the input specified list of resources and returns the set of resources evaluator matches. - MatchingResources(input []api.ResourceName) []api.ResourceName + MatchingResources(input []corev1.ResourceName) []corev1.ResourceName // Usage returns the resource usage for the specified object - Usage(item runtime.Object) (api.ResourceList, error) + Usage(item runtime.Object) (corev1.ResourceList, error) // UsageStats calculates latest observed usage stats for all objects UsageStats(options UsageStatsOptions) (UsageStats, error) } diff --git a/pkg/quota/resources.go b/pkg/quota/v1/resources.go similarity index 77% rename from pkg/quota/resources.go rename to pkg/quota/v1/resources.go index b261aedef5f..b6aa3210d4b 100644 --- a/pkg/quota/resources.go +++ b/pkg/quota/v1/resources.go @@ -19,14 +19,13 @@ package quota import ( "strings" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/sets" - api "k8s.io/kubernetes/pkg/apis/core" ) // Equals returns true if the two lists are equivalent -func Equals(a api.ResourceList, b api.ResourceList) bool { +func Equals(a corev1.ResourceList, b corev1.ResourceList) bool { if len(a) != len(b) { return false } @@ -45,7 +44,7 @@ func Equals(a api.ResourceList, b api.ResourceList) bool { } // V1Equals returns true if the two lists are equivalent -func V1Equals(a v1.ResourceList, b v1.ResourceList) bool { +func V1Equals(a corev1.ResourceList, b corev1.ResourceList) bool { if len(a) != len(b) { return false } @@ -65,9 +64,9 @@ func V1Equals(a v1.ResourceList, b v1.ResourceList) bool { // LessThanOrEqual returns true if a < b for each key in b // If false, it returns the keys in a that exceeded b -func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.ResourceName) { +func LessThanOrEqual(a corev1.ResourceList, b corev1.ResourceList) (bool, []corev1.ResourceName) { result := true - resourceNames := []api.ResourceName{} + resourceNames := []corev1.ResourceName{} for key, value := range b { if other, found := a[key]; found { if other.Cmp(value) > 0 { @@ -80,8 +79,8 @@ func LessThanOrEqual(a api.ResourceList, b api.ResourceList) (bool, []api.Resour } // Max returns the result of Max(a, b) for each named resource -func Max(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Max(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { if other, found := b[key]; found { if value.Cmp(other) <= 0 { @@ -100,8 +99,8 @@ func Max(a api.ResourceList, b api.ResourceList) api.ResourceList { } // Add returns the result of a + b for each named resource -func Add(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Add(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -120,10 +119,10 @@ func Add(a api.ResourceList, b api.ResourceList) api.ResourceList { // SubtractWithNonNegativeResult - subtracts and returns result of a - b but // makes sure we don't return negative values to prevent negative resource usage. -func SubtractWithNonNegativeResult(a api.ResourceList, b api.ResourceList) api.ResourceList { +func SubtractWithNonNegativeResult(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { zero := resource.MustParse("0") - result := api.ResourceList{} + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -145,8 +144,8 @@ func SubtractWithNonNegativeResult(a api.ResourceList, b api.ResourceList) api.R } // Subtract returns the result of a - b for each named resource -func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList { - result := api.ResourceList{} +func Subtract(a corev1.ResourceList, b corev1.ResourceList) corev1.ResourceList { + result := corev1.ResourceList{} for key, value := range a { quantity := *value.Copy() if other, found := b[key]; found { @@ -165,9 +164,9 @@ func Subtract(a api.ResourceList, b api.ResourceList) api.ResourceList { } // Mask returns a new resource list that only has the values with the specified names -func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList { +func Mask(resources corev1.ResourceList, names []corev1.ResourceName) corev1.ResourceList { nameSet := ToSet(names) - result := api.ResourceList{} + result := corev1.ResourceList{} for key, value := range resources { if nameSet.Has(string(key)) { result[key] = *value.Copy() @@ -177,8 +176,8 @@ func Mask(resources api.ResourceList, names []api.ResourceName) api.ResourceList } // ResourceNames returns a list of all resource names in the ResourceList -func ResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} +func ResourceNames(resources corev1.ResourceList) []corev1.ResourceName { + result := []corev1.ResourceName{} for resourceName := range resources { result = append(result, resourceName) } @@ -186,12 +185,12 @@ func ResourceNames(resources api.ResourceList) []api.ResourceName { } // Contains returns true if the specified item is in the list of items -func Contains(items []api.ResourceName, item api.ResourceName) bool { +func Contains(items []corev1.ResourceName, item corev1.ResourceName) bool { return ToSet(items).Has(string(item)) } // ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set -func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { +func ContainsPrefix(prefixSet []string, item corev1.ResourceName) bool { for _, prefix := range prefixSet { if strings.HasPrefix(string(item), prefix) { return true @@ -201,19 +200,19 @@ func ContainsPrefix(prefixSet []string, item api.ResourceName) bool { } // Intersection returns the intersection of both list of resources -func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName { +func Intersection(a []corev1.ResourceName, b []corev1.ResourceName) []corev1.ResourceName { setA := ToSet(a) setB := ToSet(b) setC := setA.Intersection(setB) - result := []api.ResourceName{} + result := []corev1.ResourceName{} for _, resourceName := range setC.List() { - result = append(result, api.ResourceName(resourceName)) + result = append(result, corev1.ResourceName(resourceName)) } return result } // IsZero returns true if each key maps to the quantity value 0 -func IsZero(a api.ResourceList) bool { +func IsZero(a corev1.ResourceList) bool { zero := resource.MustParse("0") for _, v := range a { if v.Cmp(zero) != 0 { @@ -224,8 +223,8 @@ func IsZero(a api.ResourceList) bool { } // IsNegative returns the set of resource names that have a negative value. -func IsNegative(a api.ResourceList) []api.ResourceName { - results := []api.ResourceName{} +func IsNegative(a corev1.ResourceList) []corev1.ResourceName { + results := []corev1.ResourceName{} zero := resource.MustParse("0") for k, v := range a { if v.Cmp(zero) < 0 { @@ -236,7 +235,7 @@ func IsNegative(a api.ResourceList) []api.ResourceName { } // ToSet takes a list of resource names and converts to a string set -func ToSet(resourceNames []api.ResourceName) sets.String { +func ToSet(resourceNames []corev1.ResourceName) sets.String { result := sets.NewString() for _, resourceName := range resourceNames { result.Insert(string(resourceName)) @@ -245,12 +244,12 @@ func ToSet(resourceNames []api.ResourceName) sets.String { } // CalculateUsage calculates and returns the requested ResourceList usage -func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardLimits api.ResourceList, registry Registry, scopeSelector *api.ScopeSelector) (api.ResourceList, error) { +func CalculateUsage(namespaceName string, scopes []corev1.ResourceQuotaScope, hardLimits corev1.ResourceList, registry Registry, scopeSelector *corev1.ScopeSelector) (corev1.ResourceList, error) { // find the intersection between the hard resources on the quota // and the resources this controller can track to know what we can // look to measure updated usage stats for hardResources := ResourceNames(hardLimits) - potentialResources := []api.ResourceName{} + potentialResources := []corev1.ResourceName{} evaluators := registry.List() for _, evaluator := range evaluators { potentialResources = append(potentialResources, evaluator.MatchingResources(hardResources)...) @@ -259,7 +258,7 @@ func CalculateUsage(namespaceName string, scopes []api.ResourceQuotaScope, hardL matchedResources := Intersection(hardResources, potentialResources) // sum the observed usage from each evaluator - newUsage := api.ResourceList{} + newUsage := corev1.ResourceList{} for _, evaluator := range evaluators { // only trigger the evaluator if it matches a resource in the quota, otherwise, skip calculating anything intersection := evaluator.MatchingResources(matchedResources) diff --git a/pkg/quota/v1/resources_test.go b/pkg/quota/v1/resources_test.go new file mode 100644 index 00000000000..61175c706eb --- /dev/null +++ b/pkg/quota/v1/resources_test.go @@ -0,0 +1,321 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestEquals(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected bool + }{ + "isEqual": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: true, + }, + "isEqualWithKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: true, + }, + "isNotEqualSameKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: false, + }, + "isNotEqualDiffKeys": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + b: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + corev1.ResourcePods: resource.MustParse("1"), + }, + expected: false, + }, + } + for testName, testCase := range testCases { + if result := Equals(testCase.a, testCase.b); result != testCase.expected { + t.Errorf("%s expected: %v, actual: %v, a=%v, b=%v", testName, testCase.expected, result, testCase.a, testCase.b) + } + } +} + +func TestMax(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "toEmpty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "matching": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + }, + "matching(reverse)": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("150m")}, + }, + "matching-equal": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for testName, testCase := range testCases { + sum := Max(testCase.a, testCase.b) + if result := Equals(testCase.expected, sum); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) + } + } +} + +func TestAdd(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "toEmpty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "matching": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("200m")}, + }, + } + for testName, testCase := range testCases { + sum := Add(testCase.a, testCase.b) + if result := Equals(testCase.expected, sum); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sum) + } + } +} + +func TestSubtract(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + b corev1.ResourceList + expected corev1.ResourceList + }{ + "noKeys": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{}, + }, + "value-empty": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + b: corev1.ResourceList{}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + "empty-value": { + a: corev1.ResourceList{}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("-100m")}, + }, + "value-value": { + a: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("200m")}, + b: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + expected: corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("100m")}, + }, + } + for testName, testCase := range testCases { + sub := Subtract(testCase.a, testCase.b) + if result := Equals(testCase.expected, sub); !result { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, sub) + } + } +} + +func TestResourceNames(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected []corev1.ResourceName + }{ + "empty": { + a: corev1.ResourceList{}, + expected: []corev1.ResourceName{}, + }, + "values": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU}, + }, + } + for testName, testCase := range testCases { + actualSet := ToSet(ResourceNames(testCase.a)) + expectedSet := ToSet(testCase.expected) + if !actualSet.Equal(expectedSet) { + t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) + } + } +} + +func TestContains(t *testing.T) { + testCases := map[string]struct { + a []corev1.ResourceName + b corev1.ResourceName + expected bool + }{ + "does-not-contain": { + a: []corev1.ResourceName{corev1.ResourceMemory}, + b: corev1.ResourceCPU, + expected: false, + }, + "does-contain": { + a: []corev1.ResourceName{corev1.ResourceMemory, corev1.ResourceCPU}, + b: corev1.ResourceCPU, + expected: true, + }, + } + for testName, testCase := range testCases { + if actual := Contains(testCase.a, testCase.b); actual != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) + } + } +} + +func TestContainsPrefix(t *testing.T) { + testCases := map[string]struct { + a []string + b corev1.ResourceName + expected bool + }{ + "does-not-contain": { + a: []string{corev1.ResourceHugePagesPrefix}, + b: corev1.ResourceCPU, + expected: false, + }, + "does-contain": { + a: []string{corev1.ResourceHugePagesPrefix}, + b: corev1.ResourceName(corev1.ResourceHugePagesPrefix + "2Mi"), + expected: true, + }, + } + for testName, testCase := range testCases { + if actual := ContainsPrefix(testCase.a, testCase.b); actual != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual) + } + } +} + +func TestIsZero(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected bool + }{ + "empty": { + a: corev1.ResourceList{}, + expected: true, + }, + "zero": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("0"), + corev1.ResourceMemory: resource.MustParse("0"), + }, + expected: true, + }, + "non-zero": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + expected: false, + }, + } + for testName, testCase := range testCases { + if result := IsZero(testCase.a); result != testCase.expected { + t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, result) + } + } +} + +func TestIsNegative(t *testing.T) { + testCases := map[string]struct { + a corev1.ResourceList + expected []corev1.ResourceName + }{ + "empty": { + a: corev1.ResourceList{}, + expected: []corev1.ResourceName{}, + }, + "some-negative": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("-10"), + corev1.ResourceMemory: resource.MustParse("0"), + }, + expected: []corev1.ResourceName{corev1.ResourceCPU}, + }, + "all-negative": { + a: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("-200m"), + corev1.ResourceMemory: resource.MustParse("-1Gi"), + }, + expected: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, + }, + } + for testName, testCase := range testCases { + actual := IsNegative(testCase.a) + actualSet := ToSet(actual) + expectedSet := ToSet(testCase.expected) + if !actualSet.Equal(expectedSet) { + t.Errorf("%s expected: %v, actual: %v", testName, expectedSet, actualSet) + } + } +} diff --git a/pkg/registry/apps/deployment/storage/BUILD b/pkg/registry/apps/deployment/storage/BUILD index a61386978d7..bc9b896959d 100644 --- a/pkg/registry/apps/deployment/storage/BUILD +++ b/pkg/registry/apps/deployment/storage/BUILD @@ -49,7 +49,6 @@ go_library( "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", "//pkg/registry/apps/deployment:go_default_library", - "//staging/src/k8s.io/api/apps/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/registry/apps/deployment/storage/storage.go b/pkg/registry/apps/deployment/storage/storage.go index 902419b62f2..a1ccfb5d78f 100644 --- a/pkg/registry/apps/deployment/storage/storage.go +++ b/pkg/registry/apps/deployment/storage/storage.go @@ -21,7 +21,6 @@ import ( "fmt" "net/http" - externalappsv1beta1 "k8s.io/api/apps/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -149,7 +148,7 @@ func (r *RollbackREST) ProducesMIMETypes(verb string) []string { // ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if // it is not nil. Only the type of the return object matters, the value will be ignored. func (r *RollbackREST) ProducesObject(verb string) interface{} { - return externalappsv1beta1.DeploymentStatus{} + return metav1.Status{} } var _ = rest.StorageMetadata(&RollbackREST{}) diff --git a/pkg/registry/core/pod/rest/log.go b/pkg/registry/core/pod/rest/log.go index ea74dff5c02..55a3c9a1941 100644 --- a/pkg/registry/core/pod/rest/log.go +++ b/pkg/registry/core/pod/rest/log.go @@ -80,6 +80,7 @@ func (r *LogREST) Get(ctx context.Context, name string, opts runtime.Object) (ru ContentType: "text/plain", Flush: logOpts.Follow, ResponseChecker: genericrest.NewGenericHttpResponseChecker(api.Resource("pods/log"), name), + RedirectChecker: genericrest.PreventRedirects, }, nil } diff --git a/pkg/registry/core/pod/rest/subresources.go b/pkg/registry/core/pod/rest/subresources.go index c914450ee6c..5ca99b8980f 100644 --- a/pkg/registry/core/pod/rest/subresources.go +++ b/pkg/registry/core/pod/rest/subresources.go @@ -194,6 +194,7 @@ func (r *PortForwardREST) Connect(ctx context.Context, name string, opts runtime func newThrottledUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired, interceptRedirects bool, responder rest.Responder) *proxy.UpgradeAwareHandler { handler := proxy.NewUpgradeAwareHandler(location, transport, wrapTransport, upgradeRequired, proxy.NewErrorResponder(responder)) handler.InterceptRedirects = interceptRedirects && utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) + handler.RequireSameHostRedirects = utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidateProxyRedirects) handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec return handler } diff --git a/pkg/registry/core/pod/storage/eviction.go b/pkg/registry/core/pod/storage/eviction.go index f0f3e375672..3e3827a079c 100644 --- a/pkg/registry/core/pod/storage/eviction.go +++ b/pkg/registry/core/pod/storage/eviction.go @@ -86,6 +86,16 @@ func (r *EvictionREST) Create(ctx context.Context, obj runtime.Object, createVal return nil, err } pod := obj.(*api.Pod) + // Evicting a terminal pod should result in direct deletion of pod as it already caused disruption by the time we are evicting. + // There is no need to check for pdb. + if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { + _, _, err = r.store.Delete(ctx, eviction.Name, eviction.DeleteOptions) + if err != nil { + return nil, err + } + return &metav1.Status{ + Status: metav1.StatusSuccess}, nil + } var rtStatus *metav1.Status var pdbName string err = retry.RetryOnConflict(EvictionsRetry, func() error { diff --git a/pkg/registry/core/pod/storage/storage_test.go b/pkg/registry/core/pod/storage/storage_test.go index 9a2a8774bf2..c806fe6441c 100644 --- a/pkg/registry/core/pod/storage/storage_test.go +++ b/pkg/registry/core/pod/storage/storage_test.go @@ -61,6 +61,7 @@ func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *etcdtesting.Et func validNewPod() *api.Pod { grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks return &api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", @@ -82,8 +83,9 @@ func validNewPod() *api.Pod { SecurityContext: securitycontext.ValidInternalSecurityContextWithContainerDefaults(), }, }, - SecurityContext: &api.PodSecurityContext{}, - SchedulerName: api.DefaultSchedulerName, + SecurityContext: &api.PodSecurityContext{}, + SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } } @@ -832,6 +834,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { } grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks podIn := api.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", @@ -855,6 +858,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { TerminationGracePeriodSeconds: &grace, SecurityContext: &api.PodSecurityContext{}, SchedulerName: api.DefaultSchedulerName, + EnableServiceLinks: &enableServiceLinks, }, } _, _, err = storage.Update(ctx, podIn.Name, rest.DefaultUpdatedObjectInfo(&podIn), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}) @@ -932,9 +936,11 @@ func TestEtcdUpdateStatus(t *testing.T) { expected := podStart expected.ResourceVersion = "2" grace := int64(30) + enableServiceLinks := v1.DefaultEnableServiceLinks expected.Spec.TerminationGracePeriodSeconds = &grace expected.Spec.RestartPolicy = api.RestartPolicyAlways expected.Spec.DNSPolicy = api.DNSClusterFirst + expected.Spec.EnableServiceLinks = &enableServiceLinks expected.Spec.Containers[0].ImagePullPolicy = api.PullIfNotPresent expected.Spec.Containers[0].TerminationMessagePath = api.TerminationMessagePathDefault expected.Spec.Containers[0].TerminationMessagePolicy = api.TerminationMessageReadFile diff --git a/pkg/registry/core/service/storage/BUILD b/pkg/registry/core/service/storage/BUILD index e3c3d527450..779dd91710e 100644 --- a/pkg/registry/core/service/storage/BUILD +++ b/pkg/registry/core/service/storage/BUILD @@ -55,7 +55,6 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/apis/core/helper:go_default_library", "//pkg/apis/core/validation:go_default_library", - "//pkg/features:go_default_library", "//pkg/printers:go_default_library", "//pkg/printers/internalversion:go_default_library", "//pkg/printers/storage:go_default_library", @@ -76,7 +75,6 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/dryrun:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) diff --git a/pkg/registry/core/service/storage/rest.go b/pkg/registry/core/service/storage/rest.go index 3a11a3d5f78..15862cdb99f 100644 --- a/pkg/registry/core/service/storage/rest.go +++ b/pkg/registry/core/service/storage/rest.go @@ -38,13 +38,11 @@ import ( genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" - utilfeature "k8s.io/apiserver/pkg/util/feature" apiservice "k8s.io/kubernetes/pkg/api/service" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/apis/core/validation" - "k8s.io/kubernetes/pkg/features" registry "k8s.io/kubernetes/pkg/registry/core/service" "k8s.io/kubernetes/pkg/registry/core/service/ipallocator" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" @@ -483,11 +481,9 @@ func (rs *REST) ResourceLocation(ctx context.Context, id string) (*url.URL, http // but in the expected case we'll only make one. for try := 0; try < len(ss.Addresses); try++ { addr := ss.Addresses[(addrSeed+try)%len(ss.Addresses)] - if !utilfeature.DefaultFeatureGate.Enabled(features.ServiceProxyAllowExternalIPs) { - if err := isValidAddress(ctx, &addr, rs.pods); err != nil { - utilruntime.HandleError(fmt.Errorf("Address %v isn't valid (%v)", addr, err)) - continue - } + if err := isValidAddress(ctx, &addr, rs.pods); err != nil { + utilruntime.HandleError(fmt.Errorf("Address %v isn't valid (%v)", addr, err)) + continue } ip := addr.IP port := int(ss.Ports[i].Port) diff --git a/pkg/registry/extensions/rest/storage_extensions.go b/pkg/registry/extensions/rest/storage_extensions.go index 0d7e7ebb299..2b21e502f07 100644 --- a/pkg/registry/extensions/rest/storage_extensions.go +++ b/pkg/registry/extensions/rest/storage_extensions.go @@ -47,17 +47,6 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag return apiGroupInfo, true } -type RollbackREST struct { - *deploymentstore.RollbackREST -} - -// override RollbackREST.ProducesObject -func (r *RollbackREST) ProducesObject(verb string) interface{} { - return extensionsapiv1beta1.DeploymentStatus{} -} - -var _ = rest.StorageMetadata(&RollbackREST{}) - func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) map[string]rest.Storage { storage := map[string]rest.Storage{} @@ -76,7 +65,7 @@ func (p RESTStorageProvider) v1beta1Storage(apiResourceConfigSource serverstorag deploymentStorage := deploymentstore.NewStorage(restOptionsGetter) storage["deployments"] = deploymentStorage.Deployment.WithCategories(nil) storage["deployments/status"] = deploymentStorage.Status - storage["deployments/rollback"] = &RollbackREST{deploymentStorage.Rollback} + storage["deployments/rollback"] = deploymentStorage.Rollback storage["deployments/scale"] = deploymentStorage.Scale // ingresses ingressStorage, ingressStatusStorage := ingressstore.NewREST(restOptionsGetter) diff --git a/pkg/scheduler/algorithm/BUILD b/pkg/scheduler/algorithm/BUILD index 254199dd6ff..3af3c59a4da 100644 --- a/pkg/scheduler/algorithm/BUILD +++ b/pkg/scheduler/algorithm/BUILD @@ -21,6 +21,7 @@ go_library( "//pkg/scheduler/cache:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], ) diff --git a/pkg/scheduler/algorithm/predicates/metadata.go b/pkg/scheduler/algorithm/predicates/metadata.go index f3dd8c895d1..4695bef0ade 100644 --- a/pkg/scheduler/algorithm/predicates/metadata.go +++ b/pkg/scheduler/algorithm/predicates/metadata.go @@ -72,10 +72,10 @@ type predicateMetadata struct { topologyPairsAntiAffinityPodsMap *topologyPairsMaps // A map of topology pairs to a list of Pods that can potentially match - // the affinity rules of the "pod" and its inverse. + // the affinity terms of the "pod" and its inverse. topologyPairsPotentialAffinityPods *topologyPairsMaps // A map of topology pairs to a list of Pods that can potentially match - // the anti-affinity rules of the "pod" and its inverse. + // the anti-affinity terms of the "pod" and its inverse. topologyPairsPotentialAntiAffinityPods *topologyPairsMaps serviceAffinityInUse bool serviceAffinityMatchingPodList []*v1.Pod @@ -130,11 +130,14 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf if pod == nil { return nil } - topologyPairsMaps, err := getMatchingTopologyPairs(pod, nodeNameToInfoMap) + // existingPodAntiAffinityMap will be used later for efficient check on existing pods' anti-affinity + existingPodAntiAffinityMap, err := getTPMapMatchingExistingAntiAffinity(pod, nodeNameToInfoMap) if err != nil { return nil } - topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, err := getPodsMatchingAffinity(pod, nodeNameToInfoMap) + // incomingPodAffinityMap will be used later for efficient check on incoming pod's affinity + // incomingPodAntiAffinityMap will be used later for efficient check on incoming pod's anti-affinity + incomingPodAffinityMap, incomingPodAntiAffinityMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(pod, nodeNameToInfoMap) if err != nil { glog.Errorf("[predicate meta data generation] error finding pods that match affinity terms: %v", err) return nil @@ -144,9 +147,9 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf podBestEffort: isPodBestEffort(pod), podRequest: GetResourceRequest(pod), podPorts: schedutil.GetContainerPorts(pod), - topologyPairsPotentialAffinityPods: topologyPairsAffinityPodsMaps, - topologyPairsPotentialAntiAffinityPods: topologyPairsAntiAffinityPodsMaps, - topologyPairsAntiAffinityPodsMap: topologyPairsMaps, + topologyPairsPotentialAffinityPods: incomingPodAffinityMap, + topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap, + topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap, } for predicateName, precomputeFunc := range predicateMetadataProducers { glog.V(10).Infof("Precompute: %v", predicateName) @@ -185,6 +188,9 @@ func (topologyPairsMaps *topologyPairsMaps) removePod(deletedPod *v1.Pod) { } func (topologyPairsMaps *topologyPairsMaps) appendMaps(toAppend *topologyPairsMaps) { + if toAppend == nil { + return + } for pair := range toAppend.topologyPairToPods { for pod := range toAppend.topologyPairToPods[pair] { topologyPairsMaps.addTopologyPair(pair, pod) @@ -232,13 +238,11 @@ func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulercache return fmt.Errorf("invalid node in nodeInfo") } // Add matching anti-affinity terms of the addedPod to the map. - topologyPairsMaps, err := getMatchingTopologyPairsOfExistingPod(meta.pod, addedPod, nodeInfo.Node()) + topologyPairsMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(meta.pod, addedPod, nodeInfo.Node()) if err != nil { return err } - if len(topologyPairsMaps.podToTopologyPairs) > 0 { - meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) - } + meta.topologyPairsAntiAffinityPodsMap.appendMaps(topologyPairsMaps) // Add the pod to nodeNameToMatchingAffinityPods and nodeNameToMatchingAntiAffinityPods if needed. affinity := meta.pod.Spec.Affinity podNodeName := addedPod.Spec.NodeName @@ -325,8 +329,8 @@ func getAffinityTermProperties(pod *v1.Pod, terms []v1.PodAffinityTerm) (propert return properties, nil } -// podMatchesAffinityTermProperties return true IFF the given pod matches all the given properties. -func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { +// podMatchesAllAffinityTermProperties returns true IFF the given pod matches all the given properties. +func podMatchesAllAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { if len(properties) == 0 { return false } @@ -338,11 +342,71 @@ func podMatchesAffinityTermProperties(pod *v1.Pod, properties []*affinityTermPro return true } -// getPodsMatchingAffinity finds existing Pods that match affinity terms of the given "pod". -// It ignores topology. It returns a set of Pods that are checked later by the affinity -// predicate. With this set of pods available, the affinity predicate does not +// podMatchesAnyAffinityTermProperties returns true if the given pod matches any given property. +func podMatchesAnyAffinityTermProperties(pod *v1.Pod, properties []*affinityTermProperties) bool { + if len(properties) == 0 { + return false + } + for _, property := range properties { + if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, property.namespaces, property.selector) { + return true + } + } + return false +} + +// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: +// (1) Whether it has PodAntiAffinity +// (2) Whether any AffinityTerm matches the incoming pod +func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { + allNodeNames := make([]string, 0, len(nodeInfoMap)) + for name := range nodeInfoMap { + allNodeNames = append(allNodeNames, name) + } + + var lock sync.Mutex + var firstError error + + topologyMaps := newTopologyPairsMaps() + + appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) { + lock.Lock() + defer lock.Unlock() + topologyMaps.appendMaps(toAppend) + } + catchError := func(err error) { + lock.Lock() + defer lock.Unlock() + if firstError == nil { + firstError = err + } + } + + processNode := func(i int) { + nodeInfo := nodeInfoMap[allNodeNames[i]] + node := nodeInfo.Node() + if node == nil { + catchError(fmt.Errorf("node not found")) + return + } + for _, existingPod := range nodeInfo.PodsWithAffinity() { + existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, node) + if err != nil { + catchError(err) + return + } + appendTopologyPairsMaps(existingPodTopologyMaps) + } + } + workqueue.Parallelize(16, len(allNodeNames), processNode) + return topologyMaps, firstError +} + +// getTPMapMatchingIncomingAffinityAntiAffinity finds existing Pods that match affinity terms of the given "pod". +// It returns a topologyPairsMaps that are checked later by the affinity +// predicate. With this topologyPairsMaps available, the affinity predicate does not // need to check all the pods in the cluster. -func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { +func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (topologyPairsAffinityPodsMaps *topologyPairsMaps, topologyPairsAntiAffinityPodsMaps *topologyPairsMaps, err error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) affinity := pod.Spec.Affinity @@ -377,17 +441,13 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache } } - affinityProperties, err := getAffinityTermProperties(pod, GetPodAffinityTerms(affinity.PodAffinity)) - if err != nil { - return nil, nil, err - } - antiAffinityProperties, err := getAffinityTermProperties(pod, GetPodAntiAffinityTerms(affinity.PodAntiAffinity)) - if err != nil { - return nil, nil, err - } - affinityTerms := GetPodAffinityTerms(affinity.PodAffinity) + affinityProperties, err := getAffinityTermProperties(pod, affinityTerms) + if err != nil { + return nil, nil, err + } antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity) + processNode := func(i int) { nodeInfo := nodeInfoMap[allNodeNames[i]] node := nodeInfo.Node() @@ -399,7 +459,7 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache nodeTopologyPairsAntiAffinityPodsMaps := newTopologyPairsMaps() for _, existingPod := range nodeInfo.Pods() { // Check affinity properties. - if podMatchesAffinityTermProperties(existingPod, affinityProperties) { + if podMatchesAllAffinityTermProperties(existingPod, affinityProperties) { for _, term := range affinityTerms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} @@ -408,8 +468,14 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache } } // Check anti-affinity properties. - if podMatchesAffinityTermProperties(existingPod, antiAffinityProperties) { - for _, term := range antiAffinityTerms { + for _, term := range antiAffinityTerms { + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) + selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) + if err != nil { + catchError(err) + return + } + if priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { pair := topologyPair{key: term.TopologyKey, value: topologyValue} nodeTopologyPairsAntiAffinityPodsMaps.addTopologyPair(pair, existingPod) @@ -425,7 +491,7 @@ func getPodsMatchingAffinity(pod *v1.Pod, nodeInfoMap map[string]*schedulercache return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError } -// podMatchesAffinity returns true if "targetPod" matches any affinity rule of +// targetPodMatchesAffinityOfPod returns true if "targetPod" matches ALL affinity terms of // "pod". Similar to getPodsMatchingAffinity, this function does not check topology. // So, whether the targetPod actually matches or not needs further checks for a specific // node. @@ -439,11 +505,11 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool { glog.Errorf("error in getting affinity properties of Pod %v", pod.Name) return false } - return podMatchesAffinityTermProperties(targetPod, affinityProperties) + return podMatchesAllAffinityTermProperties(targetPod, affinityProperties) } -// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches any anti-affinity -// rule of "pod". Similar to getPodsMatchingAffinity, this function does not check topology. +// targetPodMatchesAntiAffinityOfPod returns true if "targetPod" matches ANY anti-affinity +// term of "pod". Similar to getPodsMatchingAffinity, this function does not check topology. // So, whether the targetPod actually matches or not needs further checks for a specific // node. func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool { @@ -456,5 +522,5 @@ func targetPodMatchesAntiAffinityOfPod(pod, targetPod *v1.Pod) bool { glog.Errorf("error in getting anti-affinity properties of Pod %v", pod.Name) return false } - return podMatchesAffinityTermProperties(targetPod, properties) + return podMatchesAnyAffinityTermProperties(targetPod, properties) } diff --git a/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go index 0d724e5ae84..7fb1e1f3115 100644 --- a/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -525,3 +525,269 @@ func TestPredicateMetadata_ShallowCopy(t *testing.T) { t.Errorf("Copy is not equal to source!") } } + +// TestGetTPMapMatchingIncomingAffinityAntiAffinity tests against method getTPMapMatchingIncomingAffinityAntiAffinity +// on Anti Affinity cases +func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { + newPodAffinityTerms := func(keys ...string) []v1.PodAffinityTerm { + var terms []v1.PodAffinityTerm + for _, key := range keys { + terms = append(terms, v1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: key, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "hostname", + }) + } + return terms + } + newPod := func(labels ...string) *v1.Pod { + labelMap := make(map[string]string) + for _, l := range labels { + labelMap[l] = "" + } + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "normal", Labels: labelMap}, + Spec: v1.PodSpec{NodeName: "nodeA"}, + } + } + normalPodA := newPod("aaa") + normalPodB := newPod("bbb") + normalPodAB := newPod("aaa", "bbb") + nodeA := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"hostname": "nodeA"}}} + + tests := []struct { + name string + existingPods []*v1.Pod + nodes []*v1.Node + pod *v1.Pod + wantAffinityPodsMaps *topologyPairsMaps + wantAntiAffinityPodsMaps *topologyPairsMaps + wantErr bool + }{ + { + name: "nil test", + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"}, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: newTopologyPairsMaps(), + }, + { + name: "incoming pod without affinity/anti-affinity causes a no-op", + existingPods: []*v1.Pod{normalPodA}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "aaa-normal"}, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: newTopologyPairsMaps(), + }, + { + name: "no pod has label that violates incoming pod's affinity and anti-affinity", + existingPods: []*v1.Pod{normalPodB}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "aaa-anti"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"), + }, + }, + }, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: newTopologyPairsMaps(), + }, + { + name: "existing pod matches incoming pod's affinity and anti-affinity - single term case", + existingPods: []*v1.Pod{normalPodA}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"), + }, + }, + }, + }, + wantAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + wantAntiAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + }, + { + name: "existing pod matches incoming pod's affinity and anti-affinity - mutiple terms case", + existingPods: []*v1.Pod{normalPodAB}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa"), + }, + }, + }, + }, + wantAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + wantAntiAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + }, + { + name: "existing pod not match incoming pod's affinity but matches anti-affinity", + existingPods: []*v1.Pod{normalPodA}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"), + }, + }, + }, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodA: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + }, + { + name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 1", + existingPods: []*v1.Pod{normalPodAB}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "anaffi-antiaffiti"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "ccc"), + }, + }, + }, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodAB: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + }, + { + name: "incoming pod's anti-affinity has more than one term - existing pod violates partial term - case 2", + existingPods: []*v1.Pod{normalPodB}, + nodes: []*v1.Node{nodeA}, + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "affi-antiaffi"}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"), + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: newPodAffinityTerms("aaa", "bbb"), + }, + }, + }, + }, + wantAffinityPodsMaps: newTopologyPairsMaps(), + wantAntiAffinityPodsMaps: &topologyPairsMaps{ + topologyPairToPods: map[topologyPair]podSet{ + {key: "hostname", value: "nodeA"}: {normalPodB: struct{}{}}, + }, + podToTopologyPairs: map[string]topologyPairSet{ + "normal_": { + topologyPair{key: "hostname", value: "nodeA"}: struct{}{}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nodeInfoMap := schedulercache.CreateNodeNameToInfoMap(tt.existingPods, tt.nodes) + + gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, nodeInfoMap) + if (err != nil) != tt.wantErr { + t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotAffinityPodsMaps, tt.wantAffinityPodsMaps) { + t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAffinityPodsMaps = %#v, want %#v", gotAffinityPodsMaps, tt.wantAffinityPodsMaps) + } + if !reflect.DeepEqual(gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps) { + t.Errorf("getTPMapMatchingIncomingAffinityAntiAffinity() gotAntiAffinityPodsMaps = %#v, want %#v", gotAntiAffinityPodsMaps, tt.wantAntiAffinityPodsMaps) + } + }) + } +} diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index 4ba810bd4f8..b1bace2fe30 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -22,7 +22,6 @@ import ( "os" "regexp" "strconv" - "sync" "github.com/golang/glog" @@ -37,7 +36,6 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" - "k8s.io/client-go/util/workqueue" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/features" @@ -1187,7 +1185,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta algorithm // targetPod matches all the terms and their topologies, 2) whether targetPod // matches all the terms label selector and namespaces (AKA term properties), // 3) any error. -func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { +func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) (bool, bool, error) { if len(terms) == 0 { return false, false, fmt.Errorf("terms array is empty") } @@ -1195,7 +1193,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod *v1.Pod, targetPod * if err != nil { return false, false, err } - if !podMatchesAffinityTermProperties(targetPod, props) { + if !podMatchesAllAffinityTermProperties(targetPod, props) { return false, false, nil } // Namespace and selector of the terms have matched. Now we check topology of the terms. @@ -1242,112 +1240,55 @@ func GetPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.Po return terms } -func getMatchingTopologyPairs(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) (*topologyPairsMaps, error) { - allNodeNames := make([]string, 0, len(nodeInfoMap)) - for name := range nodeInfoMap { - allNodeNames = append(allNodeNames, name) - } - - var lock sync.Mutex - var firstError error - - topologyMaps := newTopologyPairsMaps() - - appendTopologyPairsMaps := func(toAppend *topologyPairsMaps) { - lock.Lock() - defer lock.Unlock() - topologyMaps.appendMaps(toAppend) - } - catchError := func(err error) { - lock.Lock() - defer lock.Unlock() - if firstError == nil { - firstError = err - } - } - - processNode := func(i int) { - nodeInfo := nodeInfoMap[allNodeNames[i]] - node := nodeInfo.Node() - if node == nil { - catchError(fmt.Errorf("node not found")) - return - } - nodeTopologyMaps := newTopologyPairsMaps() - for _, existingPod := range nodeInfo.PodsWithAffinity() { - affinity := existingPod.Spec.Affinity - if affinity == nil { - continue - } - for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) - selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) - if err != nil { - catchError(err) - return - } - if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { - if topologyValue, ok := node.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - nodeTopologyMaps.addTopologyPair(pair, existingPod) - } - } - } - if len(nodeTopologyMaps.podToTopologyPairs) > 0 { - appendTopologyPairsMaps(nodeTopologyMaps) - } - } - } - workqueue.Parallelize(16, len(allNodeNames), processNode) - return topologyMaps, firstError -} - -func getMatchingTopologyPairsOfExistingPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) { - topologyMaps := newTopologyPairsMaps() +// getMatchingAntiAffinityTopologyPairs calculates the following for "existingPod" on given node: +// (1) Whether it has PodAntiAffinity +// (2) Whether ANY AffinityTerm matches the incoming pod +func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.Pod, node *v1.Node) (*topologyPairsMaps, error) { affinity := existingPod.Spec.Affinity - if affinity != nil && affinity.PodAntiAffinity != nil { - for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { - namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) - selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) - if err != nil { - return nil, err - } - if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { - if topologyValue, ok := node.Labels[term.TopologyKey]; ok { - pair := topologyPair{key: term.TopologyKey, value: topologyValue} - topologyMaps.addTopologyPair(pair, existingPod) - } + if affinity == nil || affinity.PodAntiAffinity == nil { + return nil, nil + } + + topologyMaps := newTopologyPairsMaps() + for _, term := range GetPodAntiAffinityTerms(affinity.PodAntiAffinity) { + namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) + selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) + if err != nil { + return nil, err + } + if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + topologyMaps.addTopologyPair(pair, existingPod) } } } return topologyMaps, nil } -func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairs(pod *v1.Pod, allPods []*v1.Pod) (*topologyPairsMaps, error) { + +func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.Pod, existingPods []*v1.Pod) (*topologyPairsMaps, error) { topologyMaps := newTopologyPairsMaps() - for _, existingPod := range allPods { - affinity := existingPod.Spec.Affinity - if affinity != nil && affinity.PodAntiAffinity != nil { - existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) - if err != nil { - if apierrors.IsNotFound(err) { - glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) - continue - } - return nil, err + for _, existingPod := range existingPods { + existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) + if err != nil { + if apierrors.IsNotFound(err) { + glog.Errorf("Node not found, %v", existingPod.Spec.NodeName) + continue } - existingPodsTopologyMaps, err := getMatchingTopologyPairsOfExistingPod(pod, existingPod, existingPodNode) - if err != nil { - return nil, err - } - topologyMaps.appendMaps(existingPodsTopologyMaps) + return nil, err } + existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode) + if err != nil { + return nil, err + } + topologyMaps.appendMaps(existingPodTopologyMaps) } return topologyMaps, nil } // Checks if scheduling the pod onto this node would break any anti-affinity -// rules indicated by the existing pods. +// terms indicated by the existing pods. func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { @@ -1365,7 +1306,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta glog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) } - if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairs(pod, filteredPods); err != nil { + if topologyMaps, err = c.getMatchingAntiAffinityTopologyPairsOfPods(pod, filteredPods); err != nil { errMessage := fmt.Sprintf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) glog.Error(errMessage) return ErrExistingPodsAntiAffinityRulesNotMatch, errors.New(errMessage) @@ -1373,7 +1314,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta } // Iterate over topology pairs to get any of the pods being affected by - // the scheduled pod anti-affinity rules + // the scheduled pod anti-affinity terms for topologyKey, topologyValue := range node.Labels { if topologyMaps.topologyPairToPods[topologyPair{key: topologyKey, value: topologyValue}] != nil { glog.V(10).Infof("Cannot schedule pod %+v onto node %v", podName(pod), node.Name) @@ -1383,15 +1324,15 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. - glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.", + glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity terms satisfied.", podName(pod), node.Name) } return nil, nil } -// nodeMatchesTopologyTerms checks whether "nodeInfo" matches +// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches // topology of all the "terms" for the given "pod". -func (c *PodAffinityChecker) nodeMatchesTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { +func (c *PodAffinityChecker) nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -1406,7 +1347,22 @@ func (c *PodAffinityChecker) nodeMatchesTopologyTerms(pod *v1.Pod, topologyPairs return true } -// Checks if scheduling the pod onto this node would break any rules of this pod. +// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches +// topology of any "term" for the given "pod". +func (c *PodAffinityChecker) nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs *topologyPairsMaps, nodeInfo *schedulercache.NodeInfo, terms []v1.PodAffinityTerm) bool { + node := nodeInfo.Node() + for _, term := range terms { + if topologyValue, ok := node.Labels[term.TopologyKey]; ok { + pair := topologyPair{key: term.TopologyKey, value: topologyValue} + if _, ok := topologyPairs.topologyPairToPods[pair]; ok { + return true + } + } + } + return false +} + +// Checks if scheduling the pod onto this node would break any term of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, affinity *v1.Affinity) (algorithm.PredicateFailureReason, error) { @@ -1418,7 +1374,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // Check all affinity terms. topologyPairsPotentialAffinityPods := predicateMeta.topologyPairsPotentialAffinityPods if affinityTerms := GetPodAffinityTerms(affinity.PodAffinity); len(affinityTerms) > 0 { - matchExists := c.nodeMatchesTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) + matchExists := c.nodeMatchesAllTopologyTerms(pod, topologyPairsPotentialAffinityPods, nodeInfo, affinityTerms) if !matchExists { // This pod may the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod @@ -1435,14 +1391,14 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // Check all anti-affinity terms. topologyPairsPotentialAntiAffinityPods := predicateMeta.topologyPairsPotentialAntiAffinityPods if antiAffinityTerms := GetPodAntiAffinityTerms(affinity.PodAntiAffinity); len(antiAffinityTerms) > 0 { - matchExists := c.nodeMatchesTopologyTerms(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) + matchExists := c.nodeMatchesAnyTopologyTerm(pod, topologyPairsPotentialAntiAffinityPods, nodeInfo, antiAffinityTerms) if matchExists { glog.V(10).Infof("Cannot schedule pod %+v onto node %v, because of PodAntiAffinity", podName(pod), node.Name) return ErrPodAntiAffinityRulesNotMatch, nil } } - } else { // We don't have precomputed metadata. We have to follow a slow path to check affinity rules. + } else { // We don't have precomputed metadata. We have to follow a slow path to check affinity terms. filteredPods, err := c.podLister.FilteredList(nodeInfo.Filter, labels.Everything()) if err != nil { return ErrPodAffinityRulesNotMatch, err @@ -1480,7 +1436,7 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, } if !matchFound && len(affinityTerms) > 0 { - // We have not been able to find any matches for the pod's affinity rules. + // We have not been able to find any matches for the pod's affinity terms. // This pod may be the first pod in a series that have affinity to themselves. In order // to not leave such pods in pending state forever, we check that if no other pod // in the cluster matches the namespace and selector of this pod and the pod matches @@ -1514,7 +1470,14 @@ func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetada return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil } - if nodeInfo.Node().Spec.Unschedulable { + // If pod tolerate unschedulable taint, it's also tolerate `node.Spec.Unschedulable`. + podToleratesUnschedulable := v1helper.TolerationsTolerateTaint(pod.Spec.Tolerations, &v1.Taint{ + Key: algorithm.TaintNodeUnschedulable, + Effect: v1.TaintEffectNoSchedule, + }) + + // TODO (k82cn): deprecates `node.Spec.Unschedulable` in 1.13. + if nodeInfo.Node().Spec.Unschedulable && !podToleratesUnschedulable { return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil } diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 00c60ad54f5..42d795819e4 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -2546,6 +2546,349 @@ func TestInterPodAffinity(t *testing.T) { fits: true, name: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. satisfy PodAntiAffinity symmetry with the existing pod", }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + }, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "service", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: podLabel2}, + Spec: v1.PodSpec{NodeName: "machine1", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + node: &node1, + fits: false, + name: "satisfies the PodAntiAffinity with existing pod but doesn't satisfy PodAntiAffinity symmetry with incoming pod", + expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: podLabel}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "service", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: podLabel2}, + Spec: v1.PodSpec{ + NodeName: "machine1", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + node: &node1, + fits: false, + expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + name: "PodAntiAffinity symmetry check a1: incoming pod and existing pod partially match each other on AffinityTerms", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: podLabel2}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: podLabel}, + Spec: v1.PodSpec{ + NodeName: "machine1", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "service", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + node: &node1, + fits: false, + expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + name: "PodAntiAffinity symmetry check a2: incoming pod and existing pod partially match each other on AffinityTerms", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"abc": "", "xyz": ""}}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "abc", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "def", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"def": "", "xyz": ""}}, + Spec: v1.PodSpec{ + NodeName: "machine1", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "abc", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "def", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + node: &node1, + fits: false, + expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + name: "PodAntiAffinity symmetry check b1: incoming pod and existing pod partially match each other on AffinityTerms", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"def": "", "xyz": ""}}, + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "abc", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "def", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"abc": "", "xyz": ""}}, + Spec: v1.PodSpec{ + NodeName: "machine1", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "abc", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "def", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + node: &node1, + fits: false, + expectFailureReasons: []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + name: "PodAntiAffinity symmetry check b2: incoming pod and existing pod partially match each other on AffinityTerms", + }, } for _, test := range tests { @@ -2828,12 +3171,15 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, }, - nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{{ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}}, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + }, fits: map[string]bool{ "nodeA": false, - "nodeB": true, + "nodeB": false, }, - name: "This test ensures that anti-affinity matches a pod when all terms of the anti-affinity rule matches a pod.", + name: "This test ensures that anti-affinity matches a pod when any term of the anti-affinity rule matches a pod.", }, { pod: &v1.Pod{ @@ -3022,6 +3368,631 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, name: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that match the inter pod affinity rule. The pod can not be scheduled onto nodeA, nodeB, but can be scheduled onto nodeC (NodeC has an existing pod that match the inter pod affinity rule but in different namespace)", }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": ""}}, + }, + pods: []*v1.Pod{ + { + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "invalid-node-label", + }, + }, + }, + }, + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{}, + fits: map[string]bool{ + "nodeA": true, + "nodeB": true, + }, + name: "Test existing pod's anti-affinity: if an existing pod has a term with invalid topologyKey, labelSelector of the term is firstly checked, and then topologyKey of the term is also checked", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "invalid-node-label", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{}, + fits: map[string]bool{ + "nodeA": true, + "nodeB": true, + }, + name: "Test incoming pod's anti-affinity: even if lableSelector matches, we still check if topologyKey matches", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "", "bar": ""}}, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod2"}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + }, + }, + }, + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + name: "Test existing pod's anti-affinity: incoming pod wouldn't considered as a fit as it violates each existingPod's terms on all nodes", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"bar": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeB", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + name: "Test incoming pod's anti-affinity: incoming pod wouldn't considered as a fit as it at least violates one anti-affinity rule of existingPod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "", "bar": ""}}, + }, + pods: []*v1.Pod{ + { + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "invalid-node-label", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": true, + }, + name: "Test existing pod's anti-affinity: only when labelSelector and topologyKey both matches, it's counted as a single term match - case when one term has invalid topologyKey", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "invalid-node-label", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "podA", Labels: map[string]string{"foo": "", "bar": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": true, + }, + name: "Test incoming pod's anti-affinity: only when labelSelector and topologyKey both matches, it's counted as a single term match - case when one term has invalid topologyKey", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "", "bar": ""}}, + }, + pods: []*v1.Pod{ + { + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + name: "Test existing pod's anti-affinity: only when labelSelector and topologyKey both matches, it's counted as a single term match - case when all terms have valid topologyKey", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "", "bar": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrPodAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + name: "Test incoming pod's anti-affinity: only when labelSelector and topologyKey both matches, it's counted as a single term match - case when all terms have valid topologyKey", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "", "bar": ""}}, + }, + pods: []*v1.Pod{ + { + Spec: v1.PodSpec{ + NodeName: "nodeA", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "labelA", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + { + Spec: v1.PodSpec{ + NodeName: "nodeB", + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "labelB", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrExistingPodsAntiAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + "nodeC": true, + }, + name: "Test existing pod's anti-affinity: existingPod on nodeA and nodeB has at least one anti-affinity term matches incoming pod, so incoming pod can only be scheduled to nodeC", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: map[string]string{"foo": "", "bar": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {}, + {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": true, + "nodeB": true, + }, + name: "Test incoming pod's affinity: firstly check if all affinityTerms matches, and then check if all topologyKeys match", + }, + { + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "region", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "bar", + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + TopologyKey: "zone", + }, + }, + }, + }, + }, + }, + pods: []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: map[string]string{"foo": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeA", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: map[string]string{"bar": ""}}, + Spec: v1.PodSpec{ + NodeName: "nodeB", + }, + }, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}}, + }, + nodesExpectAffinityFailureReasons: [][]algorithm.PredicateFailureReason{ + {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, + {ErrPodAffinityNotMatch, ErrPodAffinityRulesNotMatch}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + name: "Test incoming pod's affinity: firstly check if all affinityTerms matches, and then check if all topologyKeys match, and the match logic should be satified on the same pod", + }, } selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} @@ -4011,3 +4982,65 @@ func TestGetMaxVols(t *testing.T) { os.Setenv(KubeMaxPDVols, previousValue) } } + +func TestCheckNodeUnschedulablePredicate(t *testing.T) { + testCases := []struct { + name string + pod *v1.Pod + node *v1.Node + fit bool + }{ + { + name: "Does not schedule pod to unschedulable node (node.Spec.Unschedulable==true)", + pod: &v1.Pod{}, + node: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + fit: false, + }, + { + name: "Schedule pod to normal node", + pod: &v1.Pod{}, + node: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: false, + }, + }, + fit: true, + }, + { + name: "Schedule pod with toleration to unschedulable node (node.Spec.Unschedulable==true)", + pod: &v1.Pod{ + Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{ + { + Key: algorithm.TaintNodeUnschedulable, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + }, + node: &v1.Node{ + Spec: v1.NodeSpec{ + Unschedulable: true, + }, + }, + fit: true, + }, + } + + for _, test := range testCases { + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(test.node) + fit, _, err := CheckNodeUnschedulablePredicate(test.pod, nil, nodeInfo) + if err != nil { + t.Fatalf("Failed to check node unschedulable: %v", err) + } + + if fit != test.fit { + t.Errorf("Unexpected fit: expected %v, got %v", test.fit, fit) + } + } +} diff --git a/pkg/scheduler/algorithm/types.go b/pkg/scheduler/algorithm/types.go index cd1535cb558..0ad777e9164 100644 --- a/pkg/scheduler/algorithm/types.go +++ b/pkg/scheduler/algorithm/types.go @@ -19,6 +19,7 @@ package algorithm import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" @@ -122,6 +123,12 @@ type ReplicaSetLister interface { GetPodReplicaSets(*v1.Pod) ([]*apps.ReplicaSet, error) } +// PDBLister interface represents anything that can list PodDisruptionBudget objects. +type PDBLister interface { + // List() returns a list of PodDisruptionBudgets matching the selector. + List(labels.Selector) ([]*policyv1beta1.PodDisruptionBudget, error) +} + var _ ControllerLister = &EmptyControllerLister{} // EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data diff --git a/pkg/scheduler/algorithmprovider/defaults/defaults.go b/pkg/scheduler/algorithmprovider/defaults/defaults.go index 6b18c4fa9fd..ce1093cb5a9 100644 --- a/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -199,10 +199,13 @@ func ApplyFeatureGates() { // Fit is determined based on whether a pod can tolerate all of the node's taints factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints) + // Fit is determined based on whether a pod can tolerate unschedulable of node + factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate) // Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider // The key will insert to all providers which in algorithmProviderMap[] // if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider() factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred) + factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred) glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory") } diff --git a/pkg/scheduler/cache/BUILD b/pkg/scheduler/cache/BUILD index 98d474beba4..68f5f39c7dd 100644 --- a/pkg/scheduler/cache/BUILD +++ b/pkg/scheduler/cache/BUILD @@ -18,7 +18,6 @@ go_library( "//pkg/scheduler/util:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", @@ -43,14 +42,11 @@ go_test( "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", ], ) diff --git a/pkg/scheduler/cache/cache.go b/pkg/scheduler/cache/cache.go index 5fc39804922..439b93596c6 100644 --- a/pkg/scheduler/cache/cache.go +++ b/pkg/scheduler/cache/cache.go @@ -29,7 +29,6 @@ import ( "k8s.io/kubernetes/pkg/features" "github.com/golang/glog" - policy "k8s.io/api/policy/v1beta1" ) var ( @@ -60,7 +59,6 @@ type schedulerCache struct { podStates map[string]*podState nodes map[string]*NodeInfo nodeTree *NodeTree - pdbs map[string]*policy.PodDisruptionBudget // A map from image name to its imageState. imageStates map[string]*imageState } @@ -106,7 +104,6 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul nodeTree: newNodeTree(nil), assumedPods: make(map[string]bool), podStates: make(map[string]*podState), - pdbs: make(map[string]*policy.PodDisruptionBudget), imageStates: make(map[string]*imageState), } } @@ -127,15 +124,9 @@ func (cache *schedulerCache) Snapshot() *Snapshot { assumedPods[k] = v } - pdbs := make(map[string]*policy.PodDisruptionBudget) - for k, v := range cache.pdbs { - pdbs[k] = v.DeepCopy() - } - return &Snapshot{ Nodes: nodes, AssumedPods: assumedPods, - Pdbs: pdbs, } } @@ -522,46 +513,6 @@ func (cache *schedulerCache) removeNodeImageStates(node *v1.Node) { } } -func (cache *schedulerCache) AddPDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - // Unconditionally update cache. - cache.pdbs[string(pdb.UID)] = pdb - return nil -} - -func (cache *schedulerCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { - return cache.AddPDB(newPDB) -} - -func (cache *schedulerCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { - cache.mu.Lock() - defer cache.mu.Unlock() - - delete(cache.pdbs, string(pdb.UID)) - return nil -} - -func (cache *schedulerCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) { - cache.mu.RLock() - defer cache.mu.RUnlock() - var pdbs []*policy.PodDisruptionBudget - for _, pdb := range cache.pdbs { - if selector.Matches(labels.Set(pdb.Labels)) { - pdbs = append(pdbs, pdb) - } - } - return pdbs, nil -} - -func (cache *schedulerCache) IsUpToDate(n *NodeInfo) bool { - cache.mu.RLock() - defer cache.mu.RUnlock() - node, ok := cache.nodes[n.Node().Name] - return ok && n.generation == node.generation -} - func (cache *schedulerCache) run() { go wait.Until(cache.cleanupExpiredAssumedPods, cache.period, cache.stop) } diff --git a/pkg/scheduler/cache/cache_test.go b/pkg/scheduler/cache/cache_test.go index 45b79c0e6e5..f78aa915de4 100644 --- a/pkg/scheduler/cache/cache_test.go +++ b/pkg/scheduler/cache/cache_test.go @@ -24,13 +24,10 @@ import ( "time" "k8s.io/api/core/v1" - "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" @@ -1231,131 +1228,3 @@ func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time) } return cache } - -func makePDB(name, namespace string, uid types.UID, labels map[string]string, minAvailable int) *v1beta1.PodDisruptionBudget { - intstrMin := intstr.FromInt(minAvailable) - pdb := &v1beta1.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - Labels: labels, - UID: uid, - }, - Spec: v1beta1.PodDisruptionBudgetSpec{ - MinAvailable: &intstrMin, - Selector: &metav1.LabelSelector{MatchLabels: labels}, - }, - } - - return pdb -} - -// TestPDBOperations tests that a PDB will be add/updated/deleted correctly. -func TestPDBOperations(t *testing.T) { - ttl := 10 * time.Second - testPDBs := []*v1beta1.PodDisruptionBudget{ - makePDB("pdb0", "ns1", "uid0", map[string]string{"tkey1": "tval1"}, 3), - makePDB("pdb1", "ns1", "uid1", map[string]string{"tkey1": "tval1", "tkey2": "tval2"}, 1), - makePDB("pdb2", "ns3", "uid2", map[string]string{"tkey3": "tval3", "tkey2": "tval2"}, 10), - } - updatedPDBs := []*v1beta1.PodDisruptionBudget{ - makePDB("pdb0", "ns1", "uid0", map[string]string{"tkey4": "tval4"}, 8), - makePDB("pdb1", "ns1", "uid1", map[string]string{"tkey1": "tval1"}, 1), - makePDB("pdb2", "ns3", "uid2", map[string]string{"tkey3": "tval3", "tkey1": "tval1", "tkey2": "tval2"}, 10), - } - tests := []struct { - pdbsToAdd []*v1beta1.PodDisruptionBudget - pdbsToUpdate []*v1beta1.PodDisruptionBudget - pdbsToDelete []*v1beta1.PodDisruptionBudget - expectedPDBs []*v1beta1.PodDisruptionBudget // Expected PDBs after all operations - }{ - { - pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0]}, - pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[1], testPDBs[0]}, - expectedPDBs: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[1]}, // both will be in the cache as they have different names - }, - { - pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0]}, - pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], updatedPDBs[0]}, - expectedPDBs: []*v1beta1.PodDisruptionBudget{updatedPDBs[0]}, - }, - { - pdbsToAdd: []*v1beta1.PodDisruptionBudget{testPDBs[0], testPDBs[2]}, - pdbsToUpdate: []*v1beta1.PodDisruptionBudget{testPDBs[0], updatedPDBs[0]}, - pdbsToDelete: []*v1beta1.PodDisruptionBudget{testPDBs[0]}, - expectedPDBs: []*v1beta1.PodDisruptionBudget{testPDBs[2]}, - }, - } - - for _, test := range tests { - cache := newSchedulerCache(ttl, time.Second, nil) - for _, pdbToAdd := range test.pdbsToAdd { - if err := cache.AddPDB(pdbToAdd); err != nil { - t.Fatalf("AddPDB failed: %v", err) - } - } - - for i := range test.pdbsToUpdate { - if i == 0 { - continue - } - if err := cache.UpdatePDB(test.pdbsToUpdate[i-1], test.pdbsToUpdate[i]); err != nil { - t.Fatalf("UpdatePDB failed: %v", err) - } - } - - for _, pdb := range test.pdbsToDelete { - if err := cache.RemovePDB(pdb); err != nil { - t.Fatalf("RemovePDB failed: %v", err) - } - } - - cachedPDBs, err := cache.ListPDBs(labels.Everything()) - if err != nil { - t.Fatalf("ListPDBs failed: %v", err) - } - if len(cachedPDBs) != len(test.expectedPDBs) { - t.Errorf("Expected %d PDBs, got %d", len(test.expectedPDBs), len(cachedPDBs)) - } - for _, pdb := range test.expectedPDBs { - found := false - // find it among the cached ones - for _, cpdb := range cachedPDBs { - if pdb.UID == cpdb.UID { - found = true - if !reflect.DeepEqual(pdb, cpdb) { - t.Errorf("%v is not equal to %v", pdb, cpdb) - } - break - } - } - if !found { - t.Errorf("PDB with uid '%v' was not found in the cache.", pdb.UID) - } - - } - } -} - -func TestIsUpToDate(t *testing.T) { - cache := New(time.Duration(0), wait.NeverStop) - if err := cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n1"}}); err != nil { - t.Errorf("Could not add node: %v", err) - } - s := cache.Snapshot() - node := s.Nodes["n1"] - if !cache.IsUpToDate(node) { - t.Errorf("Node incorrectly marked as stale") - } - pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p1", UID: "p1"}, Spec: v1.PodSpec{NodeName: "n1"}} - if err := cache.AddPod(pod); err != nil { - t.Errorf("Could not add pod: %v", err) - } - if cache.IsUpToDate(node) { - t.Errorf("Node incorrectly marked as up to date") - } - badNode := &NodeInfo{node: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "n2"}}} - if cache.IsUpToDate(badNode) { - t.Errorf("Nonexistant node incorrectly marked as up to date") - } -} diff --git a/pkg/scheduler/cache/interface.go b/pkg/scheduler/cache/interface.go index 21eba905ef1..14aa485f89b 100644 --- a/pkg/scheduler/cache/interface.go +++ b/pkg/scheduler/cache/interface.go @@ -18,7 +18,6 @@ package cache import ( "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" ) @@ -97,18 +96,6 @@ type Cache interface { // RemoveNode removes overall information about node. RemoveNode(node *v1.Node) error - // AddPDB adds a PodDisruptionBudget object to the cache. - AddPDB(pdb *policy.PodDisruptionBudget) error - - // UpdatePDB updates a PodDisruptionBudget object in the cache. - UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error - - // RemovePDB removes a PodDisruptionBudget object from the cache. - RemovePDB(pdb *policy.PodDisruptionBudget) error - - // List lists all cached PDBs matching the selector. - ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) - // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. @@ -123,9 +110,6 @@ type Cache interface { // Snapshot takes a snapshot on current cache Snapshot() *Snapshot - // IsUpToDate returns true if the given NodeInfo matches the current data in the cache. - IsUpToDate(n *NodeInfo) bool - // NodeTree returns a node tree structure NodeTree() *NodeTree } @@ -134,5 +118,4 @@ type Cache interface { type Snapshot struct { AssumedPods map[string]bool Nodes map[string]*NodeInfo - Pdbs map[string]*policy.PodDisruptionBudget } diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 873c0150cf7..fc0636ef992 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -29,6 +29,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], diff --git a/pkg/scheduler/core/equivalence/BUILD b/pkg/scheduler/core/equivalence/BUILD index 2c56de7ebca..b4595519b80 100644 --- a/pkg/scheduler/core/equivalence/BUILD +++ b/pkg/scheduler/core/equivalence/BUILD @@ -27,7 +27,6 @@ go_test( "//pkg/scheduler/algorithm:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", "//pkg/scheduler/cache:go_default_library", - "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/core/equivalence/eqivalence.go b/pkg/scheduler/core/equivalence/eqivalence.go index 8101809ad21..db94c738580 100644 --- a/pkg/scheduler/core/equivalence/eqivalence.go +++ b/pkg/scheduler/core/equivalence/eqivalence.go @@ -35,6 +35,9 @@ import ( hashutil "k8s.io/kubernetes/pkg/util/hash" ) +// nodeMap stores a *NodeCache for each node. +type nodeMap map[string]*NodeCache + // Cache is a thread safe map saves and reuses the output of predicate functions, // it uses node name as key to access those cached results. // @@ -42,13 +45,23 @@ import ( // class". (Equivalence class is defined in the `Class` type.) Saved results // will be reused until an appropriate invalidation function is called. type Cache struct { - // i.e. map[string]*NodeCache - sync.Map + // NOTE(harry): Theoretically sync.Map has better performance in machine with 8+ CPUs, while + // the reality is lock contention in first level cache is rare. + mu sync.RWMutex + nodeToCache nodeMap + predicateIDMap map[string]int } // NewCache create an empty equiv class cache. -func NewCache() *Cache { - return new(Cache) +func NewCache(predicates []string) *Cache { + predicateIDMap := make(map[string]int, len(predicates)) + for id, predicate := range predicates { + predicateIDMap[predicate] = id + } + return &Cache{ + nodeToCache: make(nodeMap), + predicateIDMap: predicateIDMap, + } } // NodeCache saves and reuses the output of predicate functions. Use RunPredicate to @@ -63,34 +76,90 @@ func NewCache() *Cache { type NodeCache struct { mu sync.RWMutex cache predicateMap + // generation is current generation of node cache, incremented on node + // invalidation. + generation uint64 + // snapshotGeneration saves snapshot of generation of node cache. + snapshotGeneration uint64 + // predicateGenerations stores generation numbers for predicates, incremented on + // predicate invalidation. Created on first update. Use 0 if does not + // exist. + predicateGenerations []uint64 + // snapshotPredicateGenerations saves snapshot of generation numbers for predicates. + snapshotPredicateGenerations []uint64 } // newNodeCache returns an empty NodeCache. -func newNodeCache() *NodeCache { +func newNodeCache(n int) *NodeCache { return &NodeCache{ - cache: make(predicateMap), + cache: make(predicateMap, n), + predicateGenerations: make([]uint64, n), + snapshotPredicateGenerations: make([]uint64, n), } } +// Snapshot snapshots current generations of cache. +// NOTE: We snapshot generations of all node caches before using it and these +// operations are serialized, we can save snapshot as member of node cache +// itself. +func (c *Cache) Snapshot() { + c.mu.RLock() + defer c.mu.RUnlock() + for _, n := range c.nodeToCache { + n.mu.Lock() + // snapshot predicate generations + copy(n.snapshotPredicateGenerations, n.predicateGenerations) + // snapshot node generation + n.snapshotGeneration = n.generation + n.mu.Unlock() + } + return +} + // GetNodeCache returns the existing NodeCache for given node if present. Otherwise, // it creates the NodeCache and returns it. // The boolean flag is true if the value was loaded, false if created. func (c *Cache) GetNodeCache(name string) (nodeCache *NodeCache, exists bool) { - v, exists := c.LoadOrStore(name, newNodeCache()) - nodeCache = v.(*NodeCache) + c.mu.Lock() + defer c.mu.Unlock() + if nodeCache, exists = c.nodeToCache[name]; !exists { + nodeCache = newNodeCache(len(c.predicateIDMap)) + c.nodeToCache[name] = nodeCache + } return } +// LoadNodeCache returns the existing NodeCache for given node, nil if not +// present. +func (c *Cache) LoadNodeCache(node string) *NodeCache { + c.mu.RLock() + defer c.mu.RUnlock() + return c.nodeToCache[node] +} + +func (c *Cache) predicateKeysToIDs(predicateKeys sets.String) []int { + predicateIDs := make([]int, 0, len(predicateKeys)) + for predicateKey := range predicateKeys { + if id, ok := c.predicateIDMap[predicateKey]; ok { + predicateIDs = append(predicateIDs, id) + } else { + glog.Errorf("predicate key %q not found", predicateKey) + } + } + return predicateIDs +} + // InvalidatePredicates clears all cached results for the given predicates. func (c *Cache) InvalidatePredicates(predicateKeys sets.String) { if len(predicateKeys) == 0 { return } - c.Range(func(k, v interface{}) bool { - n := v.(*NodeCache) - n.invalidatePreds(predicateKeys) - return true - }) + c.mu.RLock() + defer c.mu.RUnlock() + predicateIDs := c.predicateKeysToIDs(predicateKeys) + for _, n := range c.nodeToCache { + n.invalidatePreds(predicateIDs) + } glog.V(5).Infof("Cache invalidation: node=*,predicates=%v", predicateKeys) } @@ -100,16 +169,22 @@ func (c *Cache) InvalidatePredicatesOnNode(nodeName string, predicateKeys sets.S if len(predicateKeys) == 0 { return } - if v, ok := c.Load(nodeName); ok { - n := v.(*NodeCache) - n.invalidatePreds(predicateKeys) + c.mu.RLock() + defer c.mu.RUnlock() + predicateIDs := c.predicateKeysToIDs(predicateKeys) + if n, ok := c.nodeToCache[nodeName]; ok { + n.invalidatePreds(predicateIDs) } glog.V(5).Infof("Cache invalidation: node=%s,predicates=%v", nodeName, predicateKeys) } // InvalidateAllPredicatesOnNode clears all cached results for one node. func (c *Cache) InvalidateAllPredicatesOnNode(nodeName string) { - c.Delete(nodeName) + c.mu.RLock() + defer c.mu.RUnlock() + if node, ok := c.nodeToCache[nodeName]; ok { + node.invalidate() + } glog.V(5).Infof("Cache invalidation: node=%s,predicates=*", nodeName) } @@ -186,8 +261,8 @@ func NewClass(pod *v1.Pod) *Class { return nil } -// predicateMap stores resultMaps with predicate name as the key. -type predicateMap map[string]resultMap +// predicateMap stores resultMaps with predicate ID as the key. +type predicateMap []resultMap // resultMap stores PredicateResult with pod equivalence hash as the key. type resultMap map[uint64]predicateResult @@ -201,22 +276,22 @@ type predicateResult struct { // RunPredicate returns a cached predicate result. In case of a cache miss, the predicate will be // run and its results cached for the next call. // -// NOTE: RunPredicate will not update the equivalence cache if the given NodeInfo is stale. +// NOTE: RunPredicate will not update the equivalence cache if generation does not match live version. func (n *NodeCache) RunPredicate( pred algorithm.FitPredicate, predicateKey string, + predicateID int, pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo, equivClass *Class, - cache schedulercache.Cache, ) (bool, []algorithm.PredicateFailureReason, error) { if nodeInfo == nil || nodeInfo.Node() == nil { // This may happen during tests. return false, []algorithm.PredicateFailureReason{}, fmt.Errorf("nodeInfo is nil or node is invalid") } - result, ok := n.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, equivClass.hash) + result, ok := n.lookupResult(pod.GetName(), nodeInfo.Node().GetName(), predicateKey, predicateID, equivClass.hash) if ok { return result.Fit, result.FailReasons, nil } @@ -224,19 +299,17 @@ func (n *NodeCache) RunPredicate( if err != nil { return fit, reasons, err } - if cache != nil { - n.updateResult(pod.GetName(), predicateKey, fit, reasons, equivClass.hash, cache, nodeInfo) - } + n.updateResult(pod.GetName(), predicateKey, predicateID, fit, reasons, equivClass.hash, nodeInfo) return fit, reasons, nil } // updateResult updates the cached result of a predicate. func (n *NodeCache) updateResult( podName, predicateKey string, + predicateID int, fit bool, reasons []algorithm.PredicateFailureReason, equivalenceHash uint64, - cache schedulercache.Cache, nodeInfo *schedulercache.NodeInfo, ) { if nodeInfo == nil || nodeInfo.Node() == nil { @@ -244,11 +317,6 @@ func (n *NodeCache) updateResult( metrics.EquivalenceCacheWrites.WithLabelValues("discarded_bad_node").Inc() return } - // Skip update if NodeInfo is stale. - if !cache.IsUpToDate(nodeInfo) { - metrics.EquivalenceCacheWrites.WithLabelValues("discarded_stale").Inc() - return - } predicateItem := predicateResult{ Fit: fit, @@ -257,16 +325,24 @@ func (n *NodeCache) updateResult( n.mu.Lock() defer n.mu.Unlock() + if (n.snapshotGeneration != n.generation) || (n.snapshotPredicateGenerations[predicateID] != n.predicateGenerations[predicateID]) { + // Generation of node or predicate has been updated since we last took + // a snapshot, this indicates that we received a invalidation request + // during this time. Cache may be stale, skip update. + metrics.EquivalenceCacheWrites.WithLabelValues("discarded_stale").Inc() + return + } // If cached predicate map already exists, just update the predicate by key - if predicates, ok := n.cache[predicateKey]; ok { + if predicates := n.cache[predicateID]; predicates != nil { // maps in golang are references, no need to add them back predicates[equivalenceHash] = predicateItem } else { - n.cache[predicateKey] = + n.cache[predicateID] = resultMap{ equivalenceHash: predicateItem, } } + n.predicateGenerations[predicateID]++ glog.V(5).Infof("Cache update: node=%s, predicate=%s,pod=%s,value=%v", nodeInfo.Node().Name, predicateKey, podName, predicateItem) @@ -276,11 +352,12 @@ func (n *NodeCache) updateResult( // cache entry was found. func (n *NodeCache) lookupResult( podName, nodeName, predicateKey string, + predicateID int, equivalenceHash uint64, ) (value predicateResult, ok bool) { n.mu.RLock() defer n.mu.RUnlock() - value, ok = n.cache[predicateKey][equivalenceHash] + value, ok = n.cache[predicateID][equivalenceHash] if ok { metrics.EquivalenceCacheHits.Inc() } else { @@ -289,15 +366,24 @@ func (n *NodeCache) lookupResult( return value, ok } -// invalidatePreds deletes cached predicates by given keys. -func (n *NodeCache) invalidatePreds(predicateKeys sets.String) { +// invalidatePreds deletes cached predicates by given IDs. +func (n *NodeCache) invalidatePreds(predicateIDs []int) { n.mu.Lock() defer n.mu.Unlock() - for predicateKey := range predicateKeys { - delete(n.cache, predicateKey) + for _, predicateID := range predicateIDs { + n.cache[predicateID] = nil + n.predicateGenerations[predicateID]++ } } +// invalidate invalidates node cache. +func (n *NodeCache) invalidate() { + n.mu.Lock() + defer n.mu.Unlock() + n.cache = make(predicateMap, len(n.cache)) + n.generation++ +} + // equivalencePod is the set of pod attributes which must match for two pods to // be considered equivalent for scheduling purposes. For correctness, this must // include any Pod field which is used by a FitPredicate. diff --git a/pkg/scheduler/core/equivalence/eqivalence_test.go b/pkg/scheduler/core/equivalence/eqivalence_test.go index e807958cc4d..35111918bdf 100644 --- a/pkg/scheduler/core/equivalence/eqivalence_test.go +++ b/pkg/scheduler/core/equivalence/eqivalence_test.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/algorithm" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" - schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing" ) // makeBasicPod returns a Pod object with many of the fields populated. @@ -155,16 +154,6 @@ type predicateItemType struct { reasons []algorithm.PredicateFailureReason } -// upToDateCache is a fake Cache where IsUpToDate always returns true. -type upToDateCache = schedulertesting.FakeCache - -// staleNodeCache is a fake Cache where IsUpToDate always returns false. -type staleNodeCache struct { - schedulertesting.FakeCache -} - -func (c *staleNodeCache) IsUpToDate(*schedulercache.NodeInfo) bool { return false } - // mockPredicate provides an algorithm.FitPredicate with pre-set return values. type mockPredicate struct { fit bool @@ -182,7 +171,6 @@ func TestRunPredicate(t *testing.T) { tests := []struct { name string pred mockPredicate - cache schedulercache.Cache expectFit, expectCacheHit, expectCacheWrite bool expectedReasons []algorithm.PredicateFailureReason expectedError string @@ -190,7 +178,6 @@ func TestRunPredicate(t *testing.T) { { name: "pod fits/cache hit", pred: mockPredicate{}, - cache: &upToDateCache{}, expectFit: true, expectCacheHit: true, expectCacheWrite: false, @@ -198,23 +185,13 @@ func TestRunPredicate(t *testing.T) { { name: "pod fits/cache miss", pred: mockPredicate{fit: true}, - cache: &upToDateCache{}, expectFit: true, expectCacheHit: false, expectCacheWrite: true, }, - { - name: "pod fits/cache miss/no write", - pred: mockPredicate{fit: true}, - cache: &staleNodeCache{}, - expectFit: true, - expectCacheHit: false, - expectCacheWrite: false, - }, { name: "pod doesn't fit/cache miss", pred: mockPredicate{reasons: []algorithm.PredicateFailureReason{predicates.ErrFakePredicate}}, - cache: &upToDateCache{}, expectFit: false, expectCacheHit: false, expectCacheWrite: true, @@ -223,7 +200,6 @@ func TestRunPredicate(t *testing.T) { { name: "pod doesn't fit/cache hit", pred: mockPredicate{}, - cache: &upToDateCache{}, expectFit: false, expectCacheHit: true, expectCacheWrite: false, @@ -232,7 +208,6 @@ func TestRunPredicate(t *testing.T) { { name: "predicate error", pred: mockPredicate{err: errors.New("This is expected")}, - cache: &upToDateCache{}, expectFit: false, expectCacheHit: false, expectCacheWrite: false, @@ -240,6 +215,8 @@ func TestRunPredicate(t *testing.T) { }, } + predicatesOrdering := []string{"testPredicate"} + predicateID := 0 for _, test := range tests { t.Run(test.name, func(t *testing.T) { node := schedulercache.NewNodeInfo() @@ -249,15 +226,16 @@ func TestRunPredicate(t *testing.T) { meta := algorithm.EmptyPredicateMetadataProducer(nil, nil) // Initialize and populate equivalence class cache. - ecache := NewCache() + ecache := NewCache(predicatesOrdering) + ecache.Snapshot() nodeCache, _ := ecache.GetNodeCache(testNode.Name) equivClass := NewClass(pod) if test.expectCacheHit { - nodeCache.updateResult(pod.Name, "testPredicate", test.expectFit, test.expectedReasons, equivClass.hash, test.cache, node) + nodeCache.updateResult(pod.Name, "testPredicate", predicateID, test.expectFit, test.expectedReasons, equivClass.hash, node) } - fit, reasons, err := nodeCache.RunPredicate(test.pred.predicate, "testPredicate", pod, meta, node, equivClass, test.cache) + fit, reasons, err := nodeCache.RunPredicate(test.pred.predicate, "testPredicate", predicateID, pod, meta, node, equivClass) if err != nil { if err.Error() != test.expectedError { @@ -288,7 +266,7 @@ func TestRunPredicate(t *testing.T) { if !test.expectCacheHit && test.pred.callCount == 0 { t.Errorf("Predicate should be called") } - _, ok := nodeCache.lookupResult(pod.Name, node.Node().Name, "testPredicate", equivClass.hash) + _, ok := nodeCache.lookupResult(pod.Name, node.Node().Name, "testPredicate", predicateID, equivClass.hash) if !ok && test.expectCacheWrite { t.Errorf("Cache write should happen") } @@ -303,22 +281,24 @@ func TestRunPredicate(t *testing.T) { } func TestUpdateResult(t *testing.T) { + predicatesOrdering := []string{"GeneralPredicates"} tests := []struct { name string pod string predicateKey string + predicateID int nodeName string fit bool reasons []algorithm.PredicateFailureReason equivalenceHash uint64 expectPredicateMap bool expectCacheItem predicateResult - cache schedulercache.Cache }{ { name: "test 1", pod: "testPod", predicateKey: "GeneralPredicates", + predicateID: 0, nodeName: "node1", fit: true, equivalenceHash: 123, @@ -326,12 +306,12 @@ func TestUpdateResult(t *testing.T) { expectCacheItem: predicateResult{ Fit: true, }, - cache: &upToDateCache{}, }, { name: "test 2", pod: "testPod", predicateKey: "GeneralPredicates", + predicateID: 0, nodeName: "node2", fit: false, equivalenceHash: 123, @@ -339,7 +319,6 @@ func TestUpdateResult(t *testing.T) { expectCacheItem: predicateResult{ Fit: false, }, - cache: &upToDateCache{}, }, } for _, test := range tests { @@ -349,14 +328,14 @@ func TestUpdateResult(t *testing.T) { node.SetNode(testNode) // Initialize and populate equivalence class cache. - ecache := NewCache() + ecache := NewCache(predicatesOrdering) nodeCache, _ := ecache.GetNodeCache(testNode.Name) if test.expectPredicateMap { predicateItem := predicateResult{ Fit: true, } - nodeCache.cache[test.predicateKey] = + nodeCache.cache[test.predicateID] = resultMap{ test.equivalenceHash: predicateItem, } @@ -365,15 +344,15 @@ func TestUpdateResult(t *testing.T) { nodeCache.updateResult( test.pod, test.predicateKey, + test.predicateID, test.fit, test.reasons, test.equivalenceHash, - test.cache, node, ) - cachedMapItem, ok := nodeCache.cache[test.predicateKey] - if !ok { + cachedMapItem := nodeCache.cache[test.predicateID] + if cachedMapItem == nil { t.Errorf("can't find expected cache item: %v", test.expectCacheItem) } else { if !reflect.DeepEqual(cachedMapItem[test.equivalenceHash], test.expectCacheItem) { @@ -394,18 +373,19 @@ func slicesEqual(a, b []algorithm.PredicateFailureReason) bool { } func TestLookupResult(t *testing.T) { + predicatesOrdering := []string{"GeneralPredicates"} tests := []struct { name string podName string nodeName string predicateKey string + predicateID int equivalenceHashForUpdatePredicate uint64 equivalenceHashForCalPredicate uint64 cachedItem predicateItemType expectedPredicateKeyMiss bool expectedEquivalenceHashMiss bool expectedPredicateItem predicateItemType - cache schedulercache.Cache }{ { name: "test 1", @@ -414,6 +394,7 @@ func TestLookupResult(t *testing.T) { equivalenceHashForUpdatePredicate: 123, equivalenceHashForCalPredicate: 123, predicateKey: "GeneralPredicates", + predicateID: 0, cachedItem: predicateItemType{ fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, @@ -423,7 +404,6 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{}, }, - cache: &upToDateCache{}, }, { name: "test 2", @@ -432,6 +412,7 @@ func TestLookupResult(t *testing.T) { equivalenceHashForUpdatePredicate: 123, equivalenceHashForCalPredicate: 123, predicateKey: "GeneralPredicates", + predicateID: 0, cachedItem: predicateItemType{ fit: true, }, @@ -440,7 +421,6 @@ func TestLookupResult(t *testing.T) { fit: true, reasons: []algorithm.PredicateFailureReason{}, }, - cache: &upToDateCache{}, }, { name: "test 3", @@ -449,6 +429,7 @@ func TestLookupResult(t *testing.T) { equivalenceHashForUpdatePredicate: 123, equivalenceHashForCalPredicate: 123, predicateKey: "GeneralPredicates", + predicateID: 0, cachedItem: predicateItemType{ fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, @@ -458,7 +439,6 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, - cache: &upToDateCache{}, }, { name: "test 4", @@ -467,6 +447,7 @@ func TestLookupResult(t *testing.T) { equivalenceHashForUpdatePredicate: 123, equivalenceHashForCalPredicate: 456, predicateKey: "GeneralPredicates", + predicateID: 0, cachedItem: predicateItemType{ fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, @@ -477,7 +458,6 @@ func TestLookupResult(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{}, }, - cache: &upToDateCache{}, }, } @@ -486,7 +466,7 @@ func TestLookupResult(t *testing.T) { testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: test.nodeName}} // Initialize and populate equivalence class cache. - ecache := NewCache() + ecache := NewCache(predicatesOrdering) nodeCache, _ := ecache.GetNodeCache(testNode.Name) node := schedulercache.NewNodeInfo() @@ -495,10 +475,10 @@ func TestLookupResult(t *testing.T) { nodeCache.updateResult( test.podName, test.predicateKey, + test.predicateID, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, - test.cache, node, ) // if we want to do invalid, invalid the cached item @@ -511,6 +491,7 @@ func TestLookupResult(t *testing.T) { result, ok := nodeCache.lookupResult(test.podName, test.nodeName, test.predicateKey, + test.predicateID, test.equivalenceHashForCalPredicate, ) fit, reasons := result.Fit, result.FailReasons @@ -659,6 +640,8 @@ func TestGetEquivalenceHash(t *testing.T) { func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { testPredicate := "GeneralPredicates" + testPredicateID := 0 + predicatesOrdering := []string{testPredicate} // tests is used to initialize all nodes tests := []struct { name string @@ -666,7 +649,6 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { nodeName string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType - cache schedulercache.Cache }{ { name: "hash predicate 123 not fits host ports", @@ -679,7 +661,6 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { predicates.ErrPodNotFitsHostPorts, }, }, - cache: &upToDateCache{}, }, { name: "hash predicate 456 not fits host ports", @@ -692,7 +673,6 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { predicates.ErrPodNotFitsHostPorts, }, }, - cache: &upToDateCache{}, }, { name: "hash predicate 123 fits", @@ -702,10 +682,9 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { cachedItem: predicateItemType{ fit: true, }, - cache: &upToDateCache{}, }, } - ecache := NewCache() + ecache := NewCache(predicatesOrdering) for _, test := range tests { node := schedulercache.NewNodeInfo() @@ -717,10 +696,10 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { nodeCache.updateResult( test.podName, testPredicate, + testPredicateID, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, - test.cache, node, ) } @@ -731,8 +710,8 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { // there should be no cached predicate any more for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if nodeCache, exist := ecache.GetNodeCache(test.nodeName); exist { - if _, exist := nodeCache.cache[testPredicate]; exist { + if nodeCache, exist := ecache.nodeToCache[test.nodeName]; exist { + if cache := nodeCache.cache[testPredicateID]; cache != nil { t.Errorf("Failed: cached item for predicate key: %v on node: %v should be invalidated", testPredicate, test.nodeName) } @@ -743,6 +722,8 @@ func TestInvalidateCachedPredicateItemOfAllNodes(t *testing.T) { func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { testPredicate := "GeneralPredicates" + testPredicateID := 0 + predicatesOrdering := []string{testPredicate} // tests is used to initialize all nodes tests := []struct { name string @@ -750,7 +731,6 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { nodeName string equivalenceHashForUpdatePredicate uint64 cachedItem predicateItemType - cache schedulercache.Cache }{ { name: "hash predicate 123 not fits host ports", @@ -761,7 +741,6 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, - cache: &upToDateCache{}, }, { name: "hash predicate 456 not fits host ports", @@ -772,7 +751,6 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { fit: false, reasons: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}, }, - cache: &upToDateCache{}, }, { name: "hash predicate 123 fits host ports", @@ -782,10 +760,9 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { cachedItem: predicateItemType{ fit: true, }, - cache: &upToDateCache{}, }, } - ecache := NewCache() + ecache := NewCache(predicatesOrdering) for _, test := range tests { node := schedulercache.NewNodeInfo() @@ -797,19 +774,21 @@ func TestInvalidateAllCachedPredicateItemOfNode(t *testing.T) { nodeCache.updateResult( test.podName, testPredicate, + testPredicateID, test.cachedItem.fit, test.cachedItem.reasons, test.equivalenceHashForUpdatePredicate, - test.cache, node, ) } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + oldNodeCache, _ := ecache.GetNodeCache(test.nodeName) + oldGeneration := oldNodeCache.generation // invalidate cached predicate for all nodes ecache.InvalidateAllPredicatesOnNode(test.nodeName) - if _, ok := ecache.GetNodeCache(test.nodeName); ok { + if n, _ := ecache.GetNodeCache(test.nodeName); oldGeneration == n.generation { t.Errorf("Failed: cached item for node: %v should be invalidated", test.nodeName) } }) diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 1bbcf7cf306..5398b023607 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -513,6 +513,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, + schedulertesting.FakePDBLister{}, false, false, schedulerapi.DefaultPercentageOfNodesToScore) diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 1ab9e576a32..21e093ca6af 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -107,10 +107,30 @@ type genericScheduler struct { cachedNodeInfoMap map[string]*schedulercache.NodeInfo volumeBinder *volumebinder.VolumeBinder pvcLister corelisters.PersistentVolumeClaimLister + pdbLister algorithm.PDBLister disablePreemption bool percentageOfNodesToScore int32 } +// snapshot snapshots equivalane cache and node infos for all fit and priority +// functions. +func (g *genericScheduler) snapshot() error { + // IMPORTANT NOTE: We must snapshot equivalence cache before snapshotting + // scheduler cache, otherwise stale data may be written into equivalence + // cache, e.g. + // 1. snapshot cache + // 2. event arrives, updating cache and invalidating predicates or whole node cache + // 3. snapshot ecache + // 4. evaludate predicates + // 5. stale result will be written to ecache + if g.equivalenceCache != nil { + g.equivalenceCache.Snapshot() + } + + // Used for all fit and priority funcs. + return g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) +} + // Schedule tries to schedule the given pod to one of the nodes in the node list. // If it succeeds, it will return the name of the node. // If it fails, it will return a FitError error with reasons. @@ -130,8 +150,7 @@ func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister return "", ErrNoNodesAvailable } - // Used for all fit and priority funcs. - err = g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) + err = g.snapshot() if err != nil { return "", err } @@ -227,7 +246,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, if !ok || fitError == nil { return nil, nil, nil, nil } - err := g.cache.UpdateNodeNameToInfoMap(g.cachedNodeInfoMap) + err := g.snapshot() if err != nil { return nil, nil, nil, err } @@ -248,7 +267,7 @@ func (g *genericScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, // In this case, we should clean-up any existing nominated node name of the pod. return nil, nil, []*v1.Pod{pod}, nil } - pdbs, err := g.cache.ListPDBs(labels.Everything()) + pdbs, err := g.pdbLister.List(labels.Everything()) if err != nil { return nil, nil, nil, err } @@ -396,14 +415,13 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v var nodeCache *equivalence.NodeCache nodeName := g.cache.NodeTree().Next() if g.equivalenceCache != nil { - nodeCache, _ = g.equivalenceCache.GetNodeCache(nodeName) + nodeCache = g.equivalenceCache.LoadNodeCache(nodeName) } fits, failedPredicates, err := podFitsOnNode( pod, meta, g.cachedNodeInfoMap[nodeName], g.predicates, - g.cache, nodeCache, g.schedulingQueue, g.alwaysCheckAllPredicates, @@ -516,7 +534,6 @@ func podFitsOnNode( meta algorithm.PredicateMetadata, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate, - cache schedulercache.Cache, nodeCache *equivalence.NodeCache, queue SchedulingQueue, alwaysCheckAllPredicates bool, @@ -558,7 +575,7 @@ func podFitsOnNode( // TODO(bsalamat): consider using eCache and adding proper eCache invalidations // when pods are nominated or their nominations change. eCacheAvailable = equivClass != nil && nodeCache != nil && !podsAdded - for _, predicateKey := range predicates.Ordering() { + for predicateID, predicateKey := range predicates.Ordering() { var ( fit bool reasons []algorithm.PredicateFailureReason @@ -567,7 +584,7 @@ func podFitsOnNode( //TODO (yastij) : compute average predicate restrictiveness to export it as Prometheus metric if predicate, exist := predicateFuncs[predicateKey]; exist { if eCacheAvailable { - fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, pod, metaToUse, nodeInfoToUse, equivClass, cache) + fit, reasons, err = nodeCache.RunPredicate(predicate, predicateKey, predicateID, pod, metaToUse, nodeInfoToUse, equivClass) } else { fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) } @@ -650,6 +667,7 @@ func PrioritizeNodes( results[i] = make(schedulerapi.HostPriorityList, len(nodes)) } } + processNode := func(index int) { nodeInfo := nodeNameToInfo[nodes[index].Name] var err error @@ -660,7 +678,7 @@ func PrioritizeNodes( results[i][index], err = priorityConfigs[i].Map(pod, meta, nodeInfo) if err != nil { appendError(err) - return + results[i][index].Host = nodes[index].Name } } } @@ -991,7 +1009,7 @@ func selectVictimsOnNode( // that we should check is if the "pod" is failing to schedule due to pod affinity // failure. // TODO(bsalamat): Consider checking affinity to lower priority pods if feasible with reasonable performance. - if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil); !fits { + if fits, _, err := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil); !fits { if err != nil { glog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) } @@ -1005,7 +1023,7 @@ func selectVictimsOnNode( violatingVictims, nonViolatingVictims := filterPodsWithPDBViolation(potentialVictims.Items, pdbs) reprievePod := func(p *v1.Pod) bool { addPod(p) - fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, nil, queue, false, nil) + fits, _, _ := podFitsOnNode(pod, meta, nodeInfoCopy, fitPredicates, nil, queue, false, nil) if !fits { removePod(p) victims = append(victims, p) @@ -1130,6 +1148,7 @@ func NewGenericScheduler( extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, pvcLister corelisters.PersistentVolumeClaimLister, + pdbLister algorithm.PDBLister, alwaysCheckAllPredicates bool, disablePreemption bool, percentageOfNodesToScore int32, @@ -1146,6 +1165,7 @@ func NewGenericScheduler( cachedNodeInfoMap: make(map[string]*schedulercache.NodeInfo), volumeBinder: volumeBinder, pvcLister: pvcLister, + pdbLister: pdbLister, alwaysCheckAllPredicates: alwaysCheckAllPredicates, disablePreemption: disablePreemption, percentageOfNodesToScore: percentageOfNodesToScore, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index cd7827b487e..670f2611fc8 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/scheduler/algorithm" @@ -44,7 +45,8 @@ import ( ) var ( - order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred} + errPrioritize = fmt.Errorf("priority map encounters an error") + order = []string{"false", "true", "matches", "nopods", algorithmpredicates.MatchInterPodAffinityPred} ) func falsePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { @@ -111,6 +113,26 @@ func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercac return reverseResult, nil } +func trueMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + return schedulerapi.HostPriority{ + Host: nodeInfo.Node().Name, + Score: 1, + }, nil +} + +func falseMapPriority(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { + return schedulerapi.HostPriority{}, errPrioritize +} + +func getNodeReducePriority(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { + for _, host := range result { + if host.Host == "" { + return fmt.Errorf("unexpected empty host name") + } + } + return nil +} + func makeNodeList(nodeNames []string) []*v1.Node { result := make([]*v1.Node, 0, len(nodeNames)) for _, nodeName := range nodeNames { @@ -399,6 +421,14 @@ func TestGenericScheduler(t *testing.T) { }, }, }, + { + predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, + prioritizers: []algorithm.PriorityConfig{{Map: falseMapPriority, Weight: 1}, {Map: trueMapPriority, Reduce: getNodeReducePriority, Weight: 2}}, + nodes: []string{"2", "1"}, + pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2"}}, + name: "test error with priority map", + wErr: errors.NewAggregate([]error{errPrioritize, errPrioritize}), + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -425,6 +455,7 @@ func TestGenericScheduler(t *testing.T) { []algorithm.SchedulerExtender{}, nil, pvcLister, + schedulertesting.FakePDBLister{}, test.alwaysCheckAllPredicates, false, schedulerapi.DefaultPercentageOfNodesToScore) @@ -457,7 +488,7 @@ func makeScheduler(predicates map[string]algorithm.FitPredicate, nodes []*v1.Nod algorithm.EmptyPredicateMetadataProducer, prioritizers, algorithm.EmptyPriorityMetadataProducer, - nil, nil, nil, false, false, + nil, nil, nil, nil, false, false, schedulerapi.DefaultPercentageOfNodesToScore) cache.UpdateNodeNameToInfoMap(s.(*genericScheduler).cachedNodeInfoMap) return s.(*genericScheduler) @@ -1381,6 +1412,7 @@ func TestPreempt(t *testing.T) { extenders, nil, schedulertesting.FakePersistentVolumeClaimLister{}, + schedulertesting.FakePDBLister{}, false, false, schedulerapi.DefaultPercentageOfNodesToScore) @@ -1474,7 +1506,10 @@ func TestCacheInvalidationRace(t *testing.T) { cacheInvalidated: make(chan struct{}), } - eCache := equivalence.NewCache() + ps := map[string]algorithm.FitPredicate{"testPredicate": testPredicate} + algorithmpredicates.SetPredicatesOrdering([]string{"testPredicate"}) + eCache := equivalence.NewCache(algorithmpredicates.Ordering()) + eCache.GetNodeCache(testNode.Name) // Ensure that equivalence cache invalidation happens after the scheduling cycle starts, but before // the equivalence cache would be updated. go func() { @@ -1490,10 +1525,9 @@ func TestCacheInvalidationRace(t *testing.T) { }() // Set up the scheduler. - ps := map[string]algorithm.FitPredicate{"testPredicate": testPredicate} - algorithmpredicates.SetPredicatesOrdering([]string{"testPredicate"}) prioritizers := []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}} pvcLister := schedulertesting.FakePersistentVolumeClaimLister([]*v1.PersistentVolumeClaim{}) + pdbLister := schedulertesting.FakePDBLister{} scheduler := NewGenericScheduler( mockCache, eCache, @@ -1502,7 +1536,90 @@ func TestCacheInvalidationRace(t *testing.T) { algorithm.EmptyPredicateMetadataProducer, prioritizers, algorithm.EmptyPriorityMetadataProducer, - nil, nil, pvcLister, true, false, + nil, nil, pvcLister, pdbLister, + true, false, + schedulerapi.DefaultPercentageOfNodesToScore) + + // First scheduling attempt should fail. + nodeLister := schedulertesting.FakeNodeLister(makeNodeList([]string{"machine1"})) + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}} + machine, err := scheduler.Schedule(pod, nodeLister) + if machine != "" || err == nil { + t.Error("First scheduling attempt did not fail") + } + + // Second scheduling attempt should succeed because cache was invalidated. + _, err = scheduler.Schedule(pod, nodeLister) + if err != nil { + t.Errorf("Second scheduling attempt failed: %v", err) + } + if callCount != 2 { + t.Errorf("Predicate should have been called twice. Was called %d times.", callCount) + } +} + +// TestCacheInvalidationRace2 tests that cache invalidation is correctly handled +// when an invalidation event happens while a predicate is running. +func TestCacheInvalidationRace2(t *testing.T) { + // Create a predicate that returns false the first time and true on subsequent calls. + var ( + podWillFit = false + callCount int + cycleStart = make(chan struct{}) + cacheInvalidated = make(chan struct{}) + once sync.Once + ) + testPredicate := func(pod *v1.Pod, + meta algorithm.PredicateMetadata, + nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + callCount++ + once.Do(func() { + cycleStart <- struct{}{} + <-cacheInvalidated + }) + if !podWillFit { + podWillFit = true + return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil + } + return true, nil, nil + } + + // Set up the mock cache. + cache := schedulercache.New(time.Duration(0), wait.NeverStop) + testNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1"}} + cache.AddNode(testNode) + + ps := map[string]algorithm.FitPredicate{"testPredicate": testPredicate} + algorithmpredicates.SetPredicatesOrdering([]string{"testPredicate"}) + eCache := equivalence.NewCache(algorithmpredicates.Ordering()) + eCache.GetNodeCache(testNode.Name) + // Ensure that equivalence cache invalidation happens after the scheduling cycle starts, but before + // the equivalence cache would be updated. + go func() { + <-cycleStart + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "new-pod", UID: "new-pod"}, + Spec: v1.PodSpec{NodeName: "machine1"}} + if err := cache.AddPod(pod); err != nil { + t.Errorf("Could not add pod to cache: %v", err) + } + eCache.InvalidateAllPredicatesOnNode("machine1") + cacheInvalidated <- struct{}{} + }() + + // Set up the scheduler. + prioritizers := []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}} + pvcLister := schedulertesting.FakePersistentVolumeClaimLister([]*v1.PersistentVolumeClaim{}) + pdbLister := schedulertesting.FakePDBLister{} + scheduler := NewGenericScheduler( + cache, + eCache, + NewSchedulingQueue(), + ps, + algorithm.EmptyPredicateMetadataProducer, + prioritizers, + algorithm.EmptyPriorityMetadataProducer, + nil, nil, pvcLister, pdbLister, true, false, schedulerapi.DefaultPercentageOfNodesToScore) // First scheduling attempt should fail. diff --git a/pkg/scheduler/core/scheduling_queue.go b/pkg/scheduler/core/scheduling_queue.go index 3c87aa246e0..bd91a5381b9 100644 --- a/pkg/scheduler/core/scheduling_queue.go +++ b/pkg/scheduler/core/scheduling_queue.go @@ -43,6 +43,10 @@ import ( "k8s.io/kubernetes/pkg/scheduler/util" ) +var ( + queueClosed = "scheduling queue is closed" +) + // SchedulingQueue is an interface for a queue to store pods waiting to be scheduled. // The interface follows a pattern similar to cache.FIFO and cache.Heap and // makes it easy to use those data structures as a SchedulingQueue. @@ -50,6 +54,8 @@ type SchedulingQueue interface { Add(pod *v1.Pod) error AddIfNotPresent(pod *v1.Pod) error AddUnschedulableIfNotPresent(pod *v1.Pod) error + // Pop removes the head of the queue and returns it. It blocks if the + // queue is empty and waits until a new item is added to the queue. Pop() (*v1.Pod, error) Update(oldPod, newPod *v1.Pod) error Delete(pod *v1.Pod) error @@ -58,6 +64,9 @@ type SchedulingQueue interface { AssignedPodUpdated(pod *v1.Pod) WaitingPodsForNode(nodeName string) []*v1.Pod WaitingPods() []*v1.Pod + // Close closes the SchedulingQueue so that the goroutine which is + // waiting to pop items can exit gracefully. + Close() } // NewSchedulingQueue initializes a new scheduling queue. If pod priority is @@ -109,12 +118,11 @@ func (f *FIFO) Delete(pod *v1.Pod) error { // shouldn't be used in production code, but scheduler has always been using it. // This function does minimal error checking. func (f *FIFO) Pop() (*v1.Pod, error) { - var result interface{} - f.FIFO.Pop(func(obj interface{}) error { - result = obj - return nil - }) - return result.(*v1.Pod), nil + result, err := f.FIFO.Pop(func(obj interface{}) error { return nil }) + if err == cache.FIFOClosedError { + return nil, fmt.Errorf(queueClosed) + } + return result.(*v1.Pod), err } // WaitingPods returns all the waiting pods in the queue. @@ -144,6 +152,11 @@ func (f *FIFO) WaitingPodsForNode(nodeName string) []*v1.Pod { return nil } +// Close closes the FIFO queue. +func (f *FIFO) Close() { + f.FIFO.Close() +} + // NewFIFO creates a FIFO object. func NewFIFO() *FIFO { return &FIFO{FIFO: cache.NewFIFO(cache.MetaNamespaceKeyFunc)} @@ -179,6 +192,10 @@ type PriorityQueue struct { // pod was in flight (we were trying to schedule it). In such a case, we put // the pod back into the activeQ if it is determined unschedulable. receivedMoveRequest bool + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool } // Making sure that PriorityQueue implements SchedulingQueue. @@ -312,6 +329,12 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { p.lock.Lock() defer p.lock.Unlock() for len(p.activeQ.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the p.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if p.closed { + return nil, fmt.Errorf(queueClosed) + } p.cond.Wait() } obj, err := p.activeQ.Pop() @@ -485,6 +508,14 @@ func (p *PriorityQueue) WaitingPods() []*v1.Pod { return result } +// Close closes the priority queue. +func (p *PriorityQueue) Close() { + p.lock.Lock() + defer p.lock.Unlock() + p.closed = true + p.cond.Broadcast() +} + // UnschedulablePodsMap holds pods that cannot be scheduled. This data structure // is used to implement unschedulableQ. type UnschedulablePodsMap struct { diff --git a/pkg/scheduler/core/scheduling_queue_test.go b/pkg/scheduler/core/scheduling_queue_test.go index 38aa087419a..9233c9969e7 100644 --- a/pkg/scheduler/core/scheduling_queue_test.go +++ b/pkg/scheduler/core/scheduling_queue_test.go @@ -17,6 +17,7 @@ limitations under the License. package core import ( + "fmt" "reflect" "sync" "testing" @@ -473,3 +474,40 @@ func TestUnschedulablePodsMap(t *testing.T) { }) } } + +func TestSchedulingQueue_Close(t *testing.T) { + tests := []struct { + name string + q SchedulingQueue + expectedErr error + }{ + { + name: "FIFO close", + q: NewFIFO(), + expectedErr: fmt.Errorf(queueClosed), + }, + { + name: "PriorityQueue close", + q: NewPriorityQueue(), + expectedErr: fmt.Errorf(queueClosed), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + pod, err := test.q.Pop() + if err.Error() != test.expectedErr.Error() { + t.Errorf("Expected err %q from Pop() if queue is closed, but got %q", test.expectedErr.Error(), err.Error()) + } + if pod != nil { + t.Errorf("Expected pod nil from Pop() if queue is closed, but got: %v", pod) + } + }() + test.q.Close() + wg.Wait() + }) + } +} diff --git a/pkg/scheduler/factory/BUILD b/pkg/scheduler/factory/BUILD index 6565076c9b9..c0a1cab3892 100644 --- a/pkg/scheduler/factory/BUILD +++ b/pkg/scheduler/factory/BUILD @@ -33,7 +33,6 @@ go_library( "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -78,7 +77,6 @@ go_test( "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/pkg/scheduler/factory/cache_comparer.go b/pkg/scheduler/factory/cache_comparer.go index fadd7be2c99..4768b79ef95 100644 --- a/pkg/scheduler/factory/cache_comparer.go +++ b/pkg/scheduler/factory/cache_comparer.go @@ -22,10 +22,8 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" - v1beta1 "k8s.io/client-go/listers/policy/v1beta1" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" "k8s.io/kubernetes/pkg/scheduler/core" ) @@ -33,7 +31,6 @@ import ( type cacheComparer struct { nodeLister corelisters.NodeLister podLister corelisters.PodLister - pdbLister v1beta1.PodDisruptionBudgetLister cache schedulercache.Cache podQueue core.SchedulingQueue @@ -54,11 +51,6 @@ func (c *cacheComparer) Compare() error { return err } - pdbs, err := c.pdbLister.List(labels.Everything()) - if err != nil { - return err - } - snapshot := c.cache.Snapshot() waitingPods := c.podQueue.WaitingPods() @@ -71,10 +63,6 @@ func (c *cacheComparer) Compare() error { glog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) } - if missed, redundant := c.ComparePdbs(pdbs, snapshot.Pdbs); len(missed)+len(redundant) != 0 { - glog.Warningf("cache mismatch: missed pdbs: %s; redundant pdbs: %s", missed, redundant) - } - return nil } @@ -114,20 +102,6 @@ func (c compareStrategy) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[ return compareStrings(actual, cached) } -func (c compareStrategy) ComparePdbs(pdbs []*policy.PodDisruptionBudget, pdbCache map[string]*policy.PodDisruptionBudget) (missed, redundant []string) { - actual := []string{} - for _, pdb := range pdbs { - actual = append(actual, string(pdb.UID)) - } - - cached := []string{} - for pdbUID := range pdbCache { - cached = append(cached, pdbUID) - } - - return compareStrings(actual, cached) -} - func compareStrings(actual, cached []string) (missed, redundant []string) { missed, redundant = []string{}, []string{} diff --git a/pkg/scheduler/factory/cache_comparer_test.go b/pkg/scheduler/factory/cache_comparer_test.go index 2a4ed53df4e..859c53b391c 100644 --- a/pkg/scheduler/factory/cache_comparer_test.go +++ b/pkg/scheduler/factory/cache_comparer_test.go @@ -21,7 +21,6 @@ import ( "testing" "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/types" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -191,68 +190,3 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes t.Errorf("redundant expected to be %s; got %s", redundant, r) } } - -func TestComparePdbs(t *testing.T) { - tests := []struct { - name string - actual []string - cached []string - missing []string - redundant []string - }{ - { - name: "redundant cache value", - actual: []string{"foo", "bar"}, - cached: []string{"bar", "foo", "foobar"}, - missing: []string{}, - redundant: []string{"foobar"}, - }, - { - name: "missing cache value", - actual: []string{"foo", "bar", "foobar"}, - cached: []string{"bar", "foo"}, - missing: []string{"foobar"}, - redundant: []string{}, - }, - { - name: "correct cache", - actual: []string{"foo", "bar", "foobar"}, - cached: []string{"bar", "foobar", "foo"}, - missing: []string{}, - redundant: []string{}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testComparePdbs(test.actual, test.cached, test.missing, test.redundant, t) - }) - } -} - -func testComparePdbs(actual, cached, missing, redundant []string, t *testing.T) { - compare := compareStrategy{} - pdbs := []*policy.PodDisruptionBudget{} - for _, uid := range actual { - pdb := &policy.PodDisruptionBudget{} - pdb.UID = types.UID(uid) - pdbs = append(pdbs, pdb) - } - - cache := make(map[string]*policy.PodDisruptionBudget) - for _, uid := range cached { - pdb := &policy.PodDisruptionBudget{} - pdb.UID = types.UID(uid) - cache[uid] = pdb - } - - m, r := compare.ComparePdbs(pdbs, cache) - - if !reflect.DeepEqual(m, missing) { - t.Errorf("missing expected to be %s; got %s", missing, m) - } - - if !reflect.DeepEqual(r, redundant) { - t.Errorf("redundant expected to be %s; got %s", redundant, r) - } -} diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index dfffc75e055..a3d62dbfada 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -28,7 +28,6 @@ import ( "github.com/golang/glog" "k8s.io/api/core/v1" - "k8s.io/api/policy/v1beta1" storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -131,7 +130,7 @@ type configFactory struct { // Handles volume binding decisions volumeBinder *volumebinder.VolumeBinder - // always check all predicates even if the middle of one predicate fails. + // Always check all predicates even if the middle of one predicate fails. alwaysCheckAllPredicates bool // Disable pod preemption or not. @@ -162,7 +161,7 @@ type ConfigFactoryArgs struct { BindTimeoutSeconds int64 } -// NewConfigFactory initializes the default implementation of a Configurator To encourage eventual privatization of the struct type, we only +// NewConfigFactory initializes the default implementation of a Configurator. To encourage eventual privatization of the struct type, we only // return the interface. func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { stopEverything := make(chan struct{}) @@ -177,6 +176,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { client: args.Client, podLister: schedulerCache, podQueue: core.NewSchedulingQueue(), + nodeLister: args.NodeInformer.Lister(), pVLister: args.PvInformer.Lister(), pVCLister: args.PvcInformer.Lister(), serviceLister: args.ServiceInformer.Lister(), @@ -256,16 +256,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.deleteNodeFromCache, }, ) - c.nodeLister = args.NodeInformer.Lister() - - args.PdbInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: c.addPDBToCache, - UpdateFunc: c.updatePDBInCache, - DeleteFunc: c.deletePDBFromCache, - }, - ) - c.pdbLister = args.PdbInformer.Lister() // On add and delete of PVs, it will affect equivalence cache items // related to persistent volume @@ -277,7 +267,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onPvDelete, }, ) - c.pVLister = args.PvInformer.Lister() // This is for MaxPDVolumeCountPredicate: add/delete PVC will affect counts of PV when it is bound. args.PvcInformer.Informer().AddEventHandler( @@ -287,7 +276,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onPvcDelete, }, ) - c.pVCLister = args.PvcInformer.Lister() // This is for ServiceAffinity: affected by the selector of the service is updated. // Also, if new service is added, equivalence cache will also become invalid since @@ -299,7 +287,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { DeleteFunc: c.onServiceDelete, }, ) - c.serviceLister = args.ServiceInformer.Lister() // Existing equivalence cache should not be affected by add/delete RC/Deployment etc, // it only make sense when pod is scheduled or deleted @@ -320,7 +307,6 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { comparer := &cacheComparer{ podLister: args.PodInformer.Lister(), nodeLister: args.NodeInformer.Lister(), - pdbLister: args.PdbInformer.Lister(), cache: c.schedulerCache, podQueue: c.podQueue, } @@ -332,6 +318,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) scheduler.Configurator { for { select { case <-c.StopEverything: + c.podQueue.Close() return case <-ch: comparer.Compare() @@ -733,9 +720,11 @@ func (c *configFactory) updatePodInCache(oldObj, newObj interface{}) { return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdatePod(oldPod, newPod); err != nil { glog.Errorf("scheduler cache UpdatePod failed: %v", err) } @@ -822,9 +811,11 @@ func (c *configFactory) deletePodFromCache(obj interface{}) { glog.Errorf("cannot convert to *v1.Pod: %v", t) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemovePod(pod); err != nil { glog.Errorf("scheduler cache RemovePod failed: %v", err) } @@ -861,15 +852,17 @@ func (c *configFactory) addNodeToCache(obj interface{}) { return } - if err := c.schedulerCache.AddNode(node); err != nil { - glog.Errorf("scheduler cache AddNode failed: %v", err) - } - + // NOTE: Because the scheduler uses equivalence cache for nodes, we need + // to create it before adding node into scheduler cache. if c.enableEquivalenceClassCache { // GetNodeCache() will lazily create NodeCache for given node if it does not exist. c.equivalencePodCache.GetNodeCache(node.GetName()) } + if err := c.schedulerCache.AddNode(node); err != nil { + glog.Errorf("scheduler cache AddNode failed: %v", err) + } + c.podQueue.MoveAllToActiveQueue() // NOTE: add a new node does not affect existing predicates in equivalence cache } @@ -886,9 +879,11 @@ func (c *configFactory) updateNodeInCache(oldObj, newObj interface{}) { return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.UpdateNode(oldNode, newNode); err != nil { glog.Errorf("scheduler cache UpdateNode failed: %v", err) } @@ -982,9 +977,11 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { glog.Errorf("cannot convert to *v1.Node: %v", t) return } - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := c.schedulerCache.RemoveNode(node); err != nil { glog.Errorf("scheduler cache RemoveNode failed: %v", err) } @@ -993,56 +990,6 @@ func (c *configFactory) deleteNodeFromCache(obj interface{}) { } } -func (c *configFactory) addPDBToCache(obj interface{}) { - pdb, ok := obj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", obj) - return - } - - if err := c.schedulerCache.AddPDB(pdb); err != nil { - glog.Errorf("scheduler cache AddPDB failed: %v", err) - } -} - -func (c *configFactory) updatePDBInCache(oldObj, newObj interface{}) { - oldPDB, ok := oldObj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert oldObj to *v1beta1.PodDisruptionBudget: %v", oldObj) - return - } - newPDB, ok := newObj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert newObj to *v1beta1.PodDisruptionBudget: %v", newObj) - return - } - - if err := c.schedulerCache.UpdatePDB(oldPDB, newPDB); err != nil { - glog.Errorf("scheduler cache UpdatePDB failed: %v", err) - } -} - -func (c *configFactory) deletePDBFromCache(obj interface{}) { - var pdb *v1beta1.PodDisruptionBudget - switch t := obj.(type) { - case *v1beta1.PodDisruptionBudget: - pdb = t - case cache.DeletedFinalStateUnknown: - var ok bool - pdb, ok = t.Obj.(*v1beta1.PodDisruptionBudget) - if !ok { - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t.Obj) - return - } - default: - glog.Errorf("cannot convert to *v1beta1.PodDisruptionBudget: %v", t) - return - } - if err := c.schedulerCache.RemovePDB(pdb); err != nil { - glog.Errorf("scheduler cache RemovePDB failed: %v", err) - } -} - // Create creates a scheduler with the default algorithm provider. func (c *configFactory) Create() (*scheduler.Config, error) { return c.CreateFromProvider(DefaultProvider) @@ -1178,7 +1125,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, // Init equivalence class cache if c.enableEquivalenceClassCache { - c.equivalencePodCache = equivalence.NewCache() + c.equivalencePodCache = equivalence.NewCache(predicates.Ordering()) glog.Info("Created equivalence class cache") } @@ -1193,6 +1140,7 @@ func (c *configFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders, c.volumeBinder, c.pVCLister, + c.pdbLister, c.alwaysCheckAllPredicates, c.disablePreemption, c.percentageOfNodesToScore, @@ -1271,6 +1219,7 @@ func (c *configFactory) getPluginArgs() (*PluginFactoryArgs, error) { ReplicaSetLister: c.replicaSetLister, StatefulSetLister: c.statefulSetLister, NodeLister: &nodeLister{c.nodeLister}, + PDBLister: c.pdbLister, NodeInfo: &predicates.CachedNodeInfo{NodeLister: c.nodeLister}, PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister}, PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister}, @@ -1414,9 +1363,11 @@ func (c *configFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue _, err := c.client.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil && errors.IsNotFound(err) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}} - // NOTE: Because the scheduler uses snapshots of schedulerCache and the live - // version of equivalencePodCache, updates must be written to schedulerCache - // before invalidating equivalencePodCache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. c.schedulerCache.RemoveNode(&node) // invalidate cached predicate for the node if c.enableEquivalenceClassCache { diff --git a/pkg/scheduler/factory/plugins.go b/pkg/scheduler/factory/plugins.go index f9a668b4a9b..8c586bb978c 100644 --- a/pkg/scheduler/factory/plugins.go +++ b/pkg/scheduler/factory/plugins.go @@ -41,6 +41,7 @@ type PluginFactoryArgs struct { ReplicaSetLister algorithm.ReplicaSetLister StatefulSetLister algorithm.StatefulSetLister NodeLister algorithm.NodeLister + PDBLister algorithm.PDBLister NodeInfo predicates.NodeInfo PVInfo predicates.PersistentVolumeInfo PVCInfo predicates.PersistentVolumeClaimInfo diff --git a/pkg/scheduler/metrics/metrics.go b/pkg/scheduler/metrics/metrics.go index bd02b23fb4a..81e047e0c84 100644 --- a/pkg/scheduler/metrics/metrics.go +++ b/pkg/scheduler/metrics/metrics.go @@ -46,6 +46,18 @@ const ( // All the histogram based metrics have 1ms as size for the smallest bucket. var ( + scheduleAttempts = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: SchedulerSubsystem, + Name: "schedule_attempts_total", + Help: "Number of attempts to schedule pods, by the result. 'unschedulable' means a pod could not be scheduled, while 'error' means an internal scheduler problem.", + }, []string{"result"}) + // PodScheduleSuccesses counts how many pods were scheduled. + PodScheduleSuccesses = scheduleAttempts.With(prometheus.Labels{"result": "scheduled"}) + // PodScheduleFailures counts how many pods could not be scheduled. + PodScheduleFailures = scheduleAttempts.With(prometheus.Labels{"result": "unschedulable"}) + // PodScheduleErrors counts how many pods could not be scheduled due to a scheduler error. + PodScheduleErrors = scheduleAttempts.With(prometheus.Labels{"result": "error"}) SchedulingLatency = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Subsystem: SchedulerSubsystem, @@ -135,6 +147,7 @@ var ( }, []string{"result"}) metricsList = []prometheus.Collector{ + scheduleAttempts, SchedulingLatency, E2eSchedulingLatency, SchedulingAlgorithmLatency, diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 3d836b6ee57..be4b49f266d 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -186,7 +186,7 @@ func (sched *Scheduler) Run() { go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything) } -// Config return scheduler's config pointer. It is exposed for testing purposes. +// Config returns scheduler's config pointer. It is exposed for testing purposes. func (sched *Scheduler) Config() *Config { return sched.config } @@ -329,9 +329,11 @@ func (sched *Scheduler) assume(assumed *v1.Pod, host string) error { // If the binding fails, scheduler will release resources allocated to assumed pod // immediately. assumed.Spec.NodeName = host - // NOTE: Because the scheduler uses snapshots of SchedulerCache and the live - // version of Ecache, updates must be written to SchedulerCache before - // invalidating Ecache. + // NOTE: Updates must be written to scheduler cache before invalidating + // equivalence cache, because we could snapshot equivalence cache after the + // invalidation and then snapshot the cache itself. If the cache is + // snapshotted before updates are written, we would update equivalence + // cache with stale information which is based on snapshot of old cache. if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil { glog.Errorf("scheduler cache AssumePod failed: %v", err) @@ -394,6 +396,10 @@ func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error { // scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting. func (sched *Scheduler) scheduleOne() { pod := sched.config.NextPod() + // pod could be nil when schedulerQueue is closed + if pod == nil { + return + } if pod.DeletionTimestamp != nil { sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name) @@ -416,6 +422,13 @@ func (sched *Scheduler) scheduleOne() { metrics.PreemptionAttempts.Inc() metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime)) metrics.SchedulingLatency.WithLabelValues(metrics.PreemptionEvaluation).Observe(metrics.SinceInSeconds(preemptionStartTime)) + // Pod did not fit anywhere, so it is counted as a failure. If preemption + // succeeds, the pod should get counted as a success the next time we try to + // schedule it. (hopefully) + metrics.PodScheduleFailures.Inc() + } else { + glog.Errorf("error selecting node for pod: %v", err) + metrics.PodScheduleErrors.Inc() } return } @@ -433,20 +446,26 @@ func (sched *Scheduler) scheduleOne() { // This function modifies 'assumedPod' if volume binding is required. allBound, err := sched.assumeVolumes(assumedPod, suggestedHost) if err != nil { + glog.Errorf("error assuming volumes: %v", err) + metrics.PodScheduleErrors.Inc() return } // assume modifies `assumedPod` by setting NodeName=suggestedHost err = sched.assume(assumedPod, suggestedHost) if err != nil { + glog.Errorf("error assuming pod: %v", err) + metrics.PodScheduleErrors.Inc() return } // bind the pod to its host asynchronously (we can do this b/c of the assumption step above). go func() { // Bind volumes first before Pod if !allBound { - err = sched.bindVolumes(assumedPod) + err := sched.bindVolumes(assumedPod) if err != nil { + glog.Errorf("error binding volumes: %v", err) + metrics.PodScheduleErrors.Inc() return } } @@ -460,7 +479,10 @@ func (sched *Scheduler) scheduleOne() { }) metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) if err != nil { - glog.Errorf("Internal error binding pod: (%v)", err) + glog.Errorf("error binding pod: %v", err) + metrics.PodScheduleErrors.Inc() + } else { + metrics.PodScheduleSuccesses.Inc() } }() } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index e950141ae43..a5158bb371b 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -561,6 +561,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, + schedulertesting.FakePDBLister{}, false, false, api.DefaultPercentageOfNodesToScore) @@ -611,6 +612,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc []algorithm.SchedulerExtender{}, nil, schedulertesting.FakePersistentVolumeClaimLister{}, + schedulertesting.FakePDBLister{}, false, false, api.DefaultPercentageOfNodesToScore) diff --git a/pkg/scheduler/testing/fake_cache.go b/pkg/scheduler/testing/fake_cache.go index f03a491cf2c..c7a4d1bfde6 100644 --- a/pkg/scheduler/testing/fake_cache.go +++ b/pkg/scheduler/testing/fake_cache.go @@ -18,7 +18,6 @@ package testing import ( "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/labels" schedulercache "k8s.io/kubernetes/pkg/scheduler/cache" ) @@ -79,20 +78,6 @@ func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.N return nil } -// AddPDB is a fake method for testing. -func (f *FakeCache) AddPDB(pdb *policy.PodDisruptionBudget) error { return nil } - -// UpdatePDB is a fake method for testing. -func (f *FakeCache) UpdatePDB(oldPDB, newPDB *policy.PodDisruptionBudget) error { return nil } - -// RemovePDB is a fake method for testing. -func (f *FakeCache) RemovePDB(pdb *policy.PodDisruptionBudget) error { return nil } - -// ListPDBs is a fake method for testing. -func (f *FakeCache) ListPDBs(selector labels.Selector) ([]*policy.PodDisruptionBudget, error) { - return nil, nil -} - // List is a fake method for testing. func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil } @@ -106,8 +91,5 @@ func (f *FakeCache) Snapshot() *schedulercache.Snapshot { return &schedulercache.Snapshot{} } -// IsUpToDate is a fake method for testing -func (f *FakeCache) IsUpToDate(*schedulercache.NodeInfo) bool { return true } - // NodeTree is a fake method for testing. func (f *FakeCache) NodeTree() *schedulercache.NodeTree { return nil } diff --git a/pkg/scheduler/testing/fake_lister.go b/pkg/scheduler/testing/fake_lister.go index 5b67a0cade8..8468aa25b64 100644 --- a/pkg/scheduler/testing/fake_lister.go +++ b/pkg/scheduler/testing/fake_lister.go @@ -21,6 +21,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" @@ -214,3 +215,11 @@ func (f *fakePersistentVolumeClaimNamespaceLister) Get(name string) (*v1.Persist func (f fakePersistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { return nil, fmt.Errorf("not implemented") } + +// FakePDBLister implements PDBLister on a slice of PodDisruptionBudgets for test purposes. +type FakePDBLister []*policy.PodDisruptionBudget + +// List returns a list of PodDisruptionBudgets. +func (f FakePDBLister) List(labels.Selector) ([]*policy.PodDisruptionBudget, error) { + return f, nil +} diff --git a/pkg/serviceaccount/util_test.go b/pkg/serviceaccount/util_test.go index 88888e2e446..75db30cf489 100644 --- a/pkg/serviceaccount/util_test.go +++ b/pkg/serviceaccount/util_test.go @@ -44,6 +44,20 @@ func TestIsServiceAccountToken(t *testing.T) { }, } + secretTypeMistmatch := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "token-secret-2", + Namespace: "default", + UID: "23456", + ResourceVersion: "1", + Annotations: map[string]string{ + v1.ServiceAccountNameKey: "default", + v1.ServiceAccountUIDKey: "12345", + }, + }, + Type: v1.SecretTypeOpaque, + } + saIns := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: "default", @@ -91,6 +105,11 @@ func TestIsServiceAccountToken(t *testing.T) { sa: saInsUIDNotEqual, expect: false, }, + "service account type not equal": { + secret: secretTypeMistmatch, + sa: saIns, + expect: false, + }, } for k, v := range tests { diff --git a/pkg/util/BUILD b/pkg/util/BUILD index d80a3832de1..4e508d8a587 100644 --- a/pkg/util/BUILD +++ b/pkg/util/BUILD @@ -64,7 +64,6 @@ filegroup( "//pkg/util/term:all-srcs", "//pkg/util/threading:all-srcs", "//pkg/util/tolerations:all-srcs", - "//pkg/util/version:all-srcs", "//pkg/util/workqueue/prometheus:all-srcs", ], tags = ["automanaged"], diff --git a/pkg/util/ipset/ipset.go b/pkg/util/ipset/ipset.go index 4bc4a739d38..fd367f28121 100644 --- a/pkg/util/ipset/ipset.go +++ b/pkg/util/ipset/ipset.go @@ -286,7 +286,7 @@ func (runner *runner) CreateSet(set *IPSet, ignoreExistErr bool) error { // otherwise raised when the same set (setname and create parameters are identical) already exists. func (runner *runner) createSet(set *IPSet, ignoreExistErr bool) error { args := []string{"create", set.Name, string(set.SetType)} - if set.SetType == HashIPPortIP || set.SetType == HashIPPort { + if set.SetType == HashIPPortIP || set.SetType == HashIPPort || set.SetType == HashIPPortNet { args = append(args, "family", set.HashFamily, "hashsize", strconv.Itoa(set.HashSize), @@ -485,7 +485,7 @@ func validateProtocol(protocol string) bool { if protocol == ProtocolTCP || protocol == ProtocolUDP || protocol == ProtocolSCTP { return true } - glog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) + glog.Errorf("Invalid entry's protocol: %s, supported protocols are [%s, %s, %s]", protocol, ProtocolTCP, ProtocolUDP, ProtocolSCTP) return false } diff --git a/pkg/util/iptables/BUILD b/pkg/util/iptables/BUILD index 3e58721fe55..40529906a58 100644 --- a/pkg/util/iptables/BUILD +++ b/pkg/util/iptables/BUILD @@ -18,8 +18,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/util/iptables", deps = [ "//pkg/util/dbus:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/trace:go_default_library", "//vendor/github.com/godbus/dbus:go_default_library", "//vendor/github.com/golang/glog:go_default_library", diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index 8c9b6722aff..e81c21497ba 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -28,9 +28,9 @@ import ( godbus "github.com/godbus/dbus" "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" + utilversion "k8s.io/apimachinery/pkg/util/version" utiltrace "k8s.io/apiserver/pkg/util/trace" utildbus "k8s.io/kubernetes/pkg/util/dbus" - utilversion "k8s.io/kubernetes/pkg/util/version" utilexec "k8s.io/utils/exec" ) diff --git a/pkg/util/ipvs/ipvs.go b/pkg/util/ipvs/ipvs.go index 58e76a56c90..ca1b55696b4 100644 --- a/pkg/util/ipvs/ipvs.go +++ b/pkg/util/ipvs/ipvs.go @@ -41,6 +41,8 @@ type Interface interface { GetRealServers(*VirtualServer) ([]*RealServer, error) // DeleteRealServer deletes the specified real server from the specified virtual server. DeleteRealServer(*VirtualServer, *RealServer) error + // UpdateRealServer updates the specified real server from the specified virtual server. + UpdateRealServer(*VirtualServer, *RealServer) error } // VirtualServer is an user-oriented definition of an IPVS virtual server in its entirety. @@ -91,9 +93,11 @@ func (svc *VirtualServer) String() string { // RealServer is an user-oriented definition of an IPVS real server in its entirety. type RealServer struct { - Address net.IP - Port uint16 - Weight int + Address net.IP + Port uint16 + Weight int + ActiveConn int + InactiveConn int } func (rs *RealServer) String() string { diff --git a/pkg/util/ipvs/ipvs_linux.go b/pkg/util/ipvs/ipvs_linux.go index 47c640c183f..b7fcca90531 100644 --- a/pkg/util/ipvs/ipvs_linux.go +++ b/pkg/util/ipvs/ipvs_linux.go @@ -144,6 +144,18 @@ func (runner *runner) DeleteRealServer(vs *VirtualServer, rs *RealServer) error return runner.ipvsHandle.DelDestination(svc, dst) } +func (runner *runner) UpdateRealServer(vs *VirtualServer, rs *RealServer) error { + svc, err := toIPVSService(vs) + if err != nil { + return err + } + dst, err := toIPVSDestination(rs) + if err != nil { + return err + } + return runner.ipvsHandle.UpdateDestination(svc, dst) +} + // GetRealServers is part of ipvs.Interface. func (runner *runner) GetRealServers(vs *VirtualServer) ([]*RealServer, error) { svc, err := toIPVSService(vs) @@ -203,9 +215,11 @@ func toRealServer(dst *libipvs.Destination) (*RealServer, error) { return nil, errors.New("ipvs destination should not be empty") } return &RealServer{ - Address: dst.Address, - Port: dst.Port, - Weight: dst.Weight, + Address: dst.Address, + Port: dst.Port, + Weight: dst.Weight, + ActiveConn: dst.ActiveConnections, + InactiveConn: dst.InactiveConnections, }, nil } diff --git a/pkg/util/ipvs/ipvs_unsupported.go b/pkg/util/ipvs/ipvs_unsupported.go index dd4d5b625be..86447d57c59 100644 --- a/pkg/util/ipvs/ipvs_unsupported.go +++ b/pkg/util/ipvs/ipvs_unsupported.go @@ -68,4 +68,8 @@ func (runner *runner) DeleteRealServer(*VirtualServer, *RealServer) error { return fmt.Errorf("IPVS not supported for this platform") } +func (runner *runner) UpdateRealServer(*VirtualServer, *RealServer) error { + return fmt.Errorf("IPVS not supported for this platform") +} + var _ = Interface(&runner{}) diff --git a/pkg/util/ipvs/testing/fake.go b/pkg/util/ipvs/testing/fake.go index 6e015a20ee6..bfc854bb365 100644 --- a/pkg/util/ipvs/testing/fake.go +++ b/pkg/util/ipvs/testing/fake.go @@ -193,4 +193,13 @@ func (f *FakeIPVS) DeleteRealServer(serv *utilipvs.VirtualServer, dest *utilipvs return nil } +// UpdateRealServer is a fake implementation, it deletes the old real server then add new real server +func (f *FakeIPVS) UpdateRealServer(serv *utilipvs.VirtualServer, dest *utilipvs.RealServer) error { + err := f.DeleteRealServer(serv, dest) + if err != nil { + return err + } + return f.AddRealServer(serv, dest) +} + var _ = utilipvs.Interface(&FakeIPVS{}) diff --git a/pkg/util/ipvs/testing/fake_test.go b/pkg/util/ipvs/testing/fake_test.go index c07a2617f63..8bed6fc7825 100644 --- a/pkg/util/ipvs/testing/fake_test.go +++ b/pkg/util/ipvs/testing/fake_test.go @@ -124,9 +124,9 @@ func TestRealServer(t *testing.T) { Protocol: string("TCP"), } rss := []*utilipvs.RealServer{ - {net.ParseIP("172.16.2.1"), 8080, 1}, - {net.ParseIP("172.16.2.2"), 8080, 2}, - {net.ParseIP("172.16.2.3"), 8080, 3}, + {Address: net.ParseIP("172.16.2.1"), Port: 8080, Weight: 1}, + {Address: net.ParseIP("172.16.2.2"), Port: 8080, Weight: 2}, + {Address: net.ParseIP("172.16.2.3"), Port: 8080, Weight: 3}, } err := fake.AddVirtualServer(vs) if err != nil { diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index e8bf8b8617e..8a7211ec4af 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -68,6 +68,7 @@ go_test( srcs = [ "exec_mount_test.go", "mount_linux_test.go", + "mount_test.go", "mount_windows_test.go", "nsenter_mount_test.go", "safe_format_and_mount_test.go", diff --git a/pkg/util/mount/exec_mount.go b/pkg/util/mount/exec_mount.go index 3c6638328f6..226f02704cc 100644 --- a/pkg/util/mount/exec_mount.go +++ b/pkg/util/mount/exec_mount.go @@ -44,10 +44,10 @@ var _ Interface = &execMounter{} // Mount runs mount(8) using given exec interface. func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := m.doExecMount(source, target, fstype, []string{"bind"}) + err := m.doExecMount(source, target, fstype, bindOpts) if err != nil { return err } diff --git a/pkg/util/mount/mount.go b/pkg/util/mount/mount.go index b48caaffbb6..8eb761cb065 100644 --- a/pkg/util/mount/mount.go +++ b/pkg/util/mount/mount.go @@ -251,9 +251,14 @@ func GetDeviceNameFromMount(mounter Interface, mountPath string) (string, int, e // It is more extensive than IsLikelyNotMountPoint // and it detects bind mounts in linux func IsNotMountPoint(mounter Interface, file string) (bool, error) { + // Resolve any symlinks in file, kernel would do the same and use the resolved path in /proc/mounts + resolvedFile, err := mounter.EvalHostSymlinks(file) + if err != nil { + return true, err + } // IsLikelyNotMountPoint provides a quick check // to determine whether file IS A mountpoint - notMnt, notMntErr := mounter.IsLikelyNotMountPoint(file) + notMnt, notMntErr := mounter.IsLikelyNotMountPoint(resolvedFile) if notMntErr != nil && os.IsPermission(notMntErr) { // We were not allowed to do the simple stat() check, e.g. on NFS with // root_squash. Fall back to /proc/mounts check below. @@ -274,7 +279,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { return notMnt, mountPointsErr } for _, mp := range mountPoints { - if mounter.IsMountPointMatch(mp, file) { + if mounter.IsMountPointMatch(mp, resolvedFile) { notMnt = false break } @@ -286,7 +291,7 @@ func IsNotMountPoint(mounter Interface, file string) (bool, error) { // use in case of bind mount, due to the fact that bind mount doesn't respect mount options. // The list equals: // options - 'bind' + 'remount' (no duplicate) -func isBind(options []string) (bool, []string) { +func isBind(options []string) (bool, []string, []string) { // Because we have an FD opened on the subpath bind mount, the "bind" option // needs to be included, otherwise the mount target will error as busy if you // remount as readonly. @@ -295,22 +300,36 @@ func isBind(options []string) (bool, []string) { // volume mount to be read only. bindRemountOpts := []string{"bind", "remount"} bind := false + bindOpts := []string{"bind"} - if len(options) != 0 { - for _, option := range options { - switch option { - case "bind": - bind = true - break - case "remount": - break - default: - bindRemountOpts = append(bindRemountOpts, option) - } + // _netdev is a userspace mount option and does not automatically get added when + // bind mount is created and hence we must carry it over. + if checkForNetDev(options) { + bindOpts = append(bindOpts, "_netdev") + } + + for _, option := range options { + switch option { + case "bind": + bind = true + break + case "remount": + break + default: + bindRemountOpts = append(bindRemountOpts, option) } } - return bind, bindRemountOpts + return bind, bindOpts, bindRemountOpts +} + +func checkForNetDev(options []string) bool { + for _, option := range options { + if option == "_netdev" { + return true + } + } + return false } // TODO: this is a workaround for the unmount device issue caused by gci mounter. diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index 6b1c0010442..c3f3a226afb 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -89,9 +89,9 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio // Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty. // All Linux distros are expected to be shipped with a mount utility that a support bind mounts. mounterPath := "" - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, []string{"bind"}) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts) if err != nil { return err } @@ -1005,6 +1005,11 @@ func (mounter *Mounter) SafeMakeDir(subdir string, base string, perm os.FileMode } func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { + if _, err := os.Stat(pathname); os.IsNotExist(err) { + return []string{}, nil + } else if err != nil { + return nil, err + } realpath, err := filepath.EvalSymlinks(pathname) if err != nil { return nil, err diff --git a/pkg/util/mount/mount_linux_test.go b/pkg/util/mount/mount_linux_test.go index 530899d5ab0..417592602da 100644 --- a/pkg/util/mount/mount_linux_test.go +++ b/pkg/util/mount/mount_linux_test.go @@ -110,6 +110,10 @@ func TestGetMountRefs(t *testing.T) { "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2", }, }, + { + "/var/fake/directory/that/doesnt/exist", + []string{}, + }, } for i, test := range tests { diff --git a/pkg/util/mount/mount_test.go b/pkg/util/mount/mount_test.go new file mode 100644 index 00000000000..26ba8d79d36 --- /dev/null +++ b/pkg/util/mount/mount_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "reflect" + "testing" +) + +func TestIsBind(t *testing.T) { + tests := []struct { + mountOption []string + isBind bool + expectedBindOpts []string + expectedRemountOpts []string + }{ + { + []string{"vers=2", "ro", "_netdev"}, + false, + []string{}, + []string{}, + }, + { + + []string{"bind", "vers=2", "ro", "_netdev"}, + true, + []string{"bind", "_netdev"}, + []string{"bind", "remount", "vers=2", "ro", "_netdev"}, + }, + } + for _, test := range tests { + bind, bindOpts, bindRemountOpts := isBind(test.mountOption) + if bind != test.isBind { + t.Errorf("Expected bind to be %v but got %v", test.isBind, bind) + } + if test.isBind { + if !reflect.DeepEqual(test.expectedBindOpts, bindOpts) { + t.Errorf("Expected bind mount options to be %+v got %+v", test.expectedBindOpts, bindOpts) + } + if !reflect.DeepEqual(test.expectedRemountOpts, bindRemountOpts) { + t.Errorf("Expected remount options to be %+v got %+v", test.expectedRemountOpts, bindRemountOpts) + } + } + + } +} diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index b3206d18c1c..535803abf50 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -68,7 +68,7 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio bindSource := "" // tell it's going to mount azure disk or azure file according to options - if bind, _ := isBind(options); bind { + if bind, _, _ := isBind(options); bind { // mount azure disk bindSource = normalizeWindowsPath(source) } else { @@ -458,12 +458,14 @@ func getAllParentLinks(path string) ([]string, error) { return links, nil } +// GetMountRefs : empty implementation here since there is no place to query all mount points on Windows func (mounter *Mounter) GetMountRefs(pathname string) ([]string, error) { - refs, err := getAllParentLinks(normalizeWindowsPath(pathname)) - if err != nil { + if _, err := os.Stat(normalizeWindowsPath(pathname)); os.IsNotExist(err) { + return []string{}, nil + } else if err != nil { return nil, err } - return refs, nil + return []string{pathname}, nil } // Note that on windows, it always returns 0. We actually don't set FSGroup on diff --git a/pkg/util/mount/mount_windows_test.go b/pkg/util/mount/mount_windows_test.go index f495c889cfc..c70f2778609 100644 --- a/pkg/util/mount/mount_windows_test.go +++ b/pkg/util/mount/mount_windows_test.go @@ -111,30 +111,25 @@ func setEquivalent(set1, set2 []string) bool { // this func must run in admin mode, otherwise it will fail func TestGetMountRefs(t *testing.T) { - fm := &FakeMounter{MountPoints: []MountPoint{}} - mountPath := `c:\secondmountpath` - expectedRefs := []string{`c:\`, `c:\firstmountpath`, mountPath} - - // remove symbolic links first - for i := 1; i < len(expectedRefs); i++ { - removeLink(expectedRefs[i]) + tests := []struct { + mountPath string + expectedRefs []string + }{ + { + mountPath: `c:\windows`, + expectedRefs: []string{`c:\windows`}, + }, + { + mountPath: `c:\doesnotexist`, + expectedRefs: []string{}, + }, } - // create symbolic links - for i := 1; i < len(expectedRefs); i++ { - if err := makeLink(expectedRefs[i], expectedRefs[i-1]); err != nil { - t.Errorf("makeLink failed: %v", err) - } - } + mounter := Mounter{"fake/path"} - if refs, err := fm.GetMountRefs(mountPath); err != nil || !setEquivalent(expectedRefs, refs) { - t.Errorf("getMountRefs(%q) = %v, error: %v; expected %v", mountPath, refs, err, expectedRefs) - } - - // remove symbolic links - for i := 1; i < len(expectedRefs); i++ { - if err := removeLink(expectedRefs[i]); err != nil { - t.Errorf("removeLink failed: %v", err) + for _, test := range tests { + if refs, err := mounter.GetMountRefs(test.mountPath); err != nil || !setEquivalent(test.expectedRefs, refs) { + t.Errorf("getMountRefs(%q) = %v, error: %v; expected %v", test.mountPath, refs, err, test.expectedRefs) } } } diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index a798defe9bf..e055c7c0074 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -61,10 +61,10 @@ var _ = Interface(&NsenterMounter{}) // Mount runs mount(8) in the host's root mount namespace. Aside from this // aspect, Mount has the same semantics as the mounter returned by mount.New() func (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error { - bind, bindRemountOpts := isBind(options) + bind, bindOpts, bindRemountOpts := isBind(options) if bind { - err := n.doNsenterMount(source, target, fstype, []string{"bind"}) + err := n.doNsenterMount(source, target, fstype, bindOpts) if err != nil { return err } @@ -337,6 +337,13 @@ func (mounter *NsenterMounter) SafeMakeDir(subdir string, base string, perm os.F } func (mounter *NsenterMounter) GetMountRefs(pathname string) ([]string, error) { + exists, err := mounter.ExistsPath(pathname) + if err != nil { + return nil, err + } + if !exists { + return []string{}, nil + } hostpath, err := mounter.ne.EvalSymlinks(pathname, true /* mustExist */) if err != nil { return nil, err diff --git a/pkg/version/doc.go b/pkg/version/doc.go index e4d68bc9f49..a4a1c035fc8 100644 --- a/pkg/version/doc.go +++ b/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version // import "k8s.io/kubernetes/pkg/version" diff --git a/pkg/volume/BUILD b/pkg/volume/BUILD index bbe76dd8dcd..344aa83b999 100644 --- a/pkg/volume/BUILD +++ b/pkg/volume/BUILD @@ -69,7 +69,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/volume/aws_ebs:all-srcs", + "//pkg/volume/awsebs:all-srcs", "//pkg/volume/azure_dd:all-srcs", "//pkg/volume/azure_file:all-srcs", "//pkg/volume/cephfs:all-srcs", diff --git a/pkg/volume/aws_ebs/BUILD b/pkg/volume/awsebs/BUILD similarity index 97% rename from pkg/volume/aws_ebs/BUILD rename to pkg/volume/awsebs/BUILD index 532ddf8b729..51322cbfe17 100644 --- a/pkg/volume/aws_ebs/BUILD +++ b/pkg/volume/awsebs/BUILD @@ -15,7 +15,7 @@ go_library( "aws_util.go", "doc.go", ], - importpath = "k8s.io/kubernetes/pkg/volume/aws_ebs", + importpath = "k8s.io/kubernetes/pkg/volume/awsebs", deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/aws:go_default_library", diff --git a/pkg/volume/aws_ebs/OWNERS b/pkg/volume/awsebs/OWNERS similarity index 100% rename from pkg/volume/aws_ebs/OWNERS rename to pkg/volume/awsebs/OWNERS diff --git a/pkg/volume/aws_ebs/attacher.go b/pkg/volume/awsebs/attacher.go similarity index 96% rename from pkg/volume/aws_ebs/attacher.go rename to pkg/volume/awsebs/attacher.go index da25691d41a..81f9411217d 100644 --- a/pkg/volume/aws_ebs/attacher.go +++ b/pkg/volume/awsebs/attacher.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" @@ -163,7 +163,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d } if devicePath == "" { - return "", fmt.Errorf("WaitForAttach failed for AWS Volume %q: devicePath is empty.", volumeID) + return "", fmt.Errorf("waitForAttach failed for AWS Volume %q: devicePath is empty", volumeID) } ticker := time.NewTicker(checkSleepDuration) @@ -175,7 +175,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d select { case <-ticker.C: glog.V(5).Infof("Checking AWS Volume %q is attached.", volumeID) - devicePaths := getDiskByIdPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) + devicePaths := getDiskByIDPaths(aws.KubernetesVolumeID(volumeSource.VolumeID), partition, devicePath) path, err := verifyDevicePath(devicePaths) if err != nil { // Log error, if any, and continue checking periodically. See issue #11321 @@ -186,7 +186,7 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, d return path, nil } case <-timer.C: - return "", fmt.Errorf("Could not find attached AWS Volume %q. Timeout waiting for mount paths to be created.", volumeID) + return "", fmt.Errorf("could not find attached AWS Volume %q. Timeout waiting for mount paths to be created", volumeID) } } } diff --git a/pkg/volume/aws_ebs/attacher_test.go b/pkg/volume/awsebs/attacher_test.go similarity index 98% rename from pkg/volume/aws_ebs/attacher_test.go rename to pkg/volume/awsebs/attacher_test.go index 36ed854d1a4..7fc397cc89a 100644 --- a/pkg/volume/aws_ebs/attacher_test.go +++ b/pkg/volume/awsebs/attacher_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "errors" @@ -219,7 +219,7 @@ func (testcase *testcase) AttachDisk(diskName aws.KubernetesVolumeID, nodeName t // testcase.attach looks uninitialized, test did not expect to call // AttachDisk testcase.t.Errorf("Unexpected AttachDisk call!") - return "", errors.New("Unexpected AttachDisk call!") + return "", errors.New("unexpected AttachDisk call") } if expected.diskName != diskName { @@ -244,7 +244,7 @@ func (testcase *testcase) DetachDisk(diskName aws.KubernetesVolumeID, nodeName t // testcase.detach looks uninitialized, test did not expect to call // DetachDisk testcase.t.Errorf("Unexpected DetachDisk call!") - return "", errors.New("Unexpected DetachDisk call!") + return "", errors.New("unexpected DetachDisk call") } if expected.diskName != diskName { diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/awsebs/aws_ebs.go similarity index 99% rename from pkg/volume/aws_ebs/aws_ebs.go rename to pkg/volume/awsebs/aws_ebs.go index 1e3ef4f1652..1d52bf53de1 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/awsebs/aws_ebs.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "context" @@ -39,7 +39,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. +// ProbeVolumePlugins is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{&awsElasticBlockStorePlugin{nil}} } diff --git a/pkg/volume/aws_ebs/aws_ebs_block.go b/pkg/volume/awsebs/aws_ebs_block.go similarity index 99% rename from pkg/volume/aws_ebs/aws_ebs_block.go rename to pkg/volume/awsebs/aws_ebs_block.go index 997f5bbc7ef..10b5e422846 100644 --- a/pkg/volume/aws_ebs/aws_ebs_block.go +++ b/pkg/volume/awsebs/aws_ebs_block.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" diff --git a/pkg/volume/aws_ebs/aws_ebs_block_test.go b/pkg/volume/awsebs/aws_ebs_block_test.go similarity index 99% rename from pkg/volume/aws_ebs/aws_ebs_block_test.go rename to pkg/volume/awsebs/aws_ebs_block_test.go index 5181492d6c0..f778c63fa61 100644 --- a/pkg/volume/aws_ebs/aws_ebs_block_test.go +++ b/pkg/volume/awsebs/aws_ebs_block_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "os" diff --git a/pkg/volume/aws_ebs/aws_ebs_test.go b/pkg/volume/awsebs/aws_ebs_test.go similarity index 99% rename from pkg/volume/aws_ebs/aws_ebs_test.go rename to pkg/volume/awsebs/aws_ebs_test.go index d5506f92c43..9325a7de8b2 100644 --- a/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/pkg/volume/awsebs/aws_ebs_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/awsebs/aws_util.go similarity index 97% rename from pkg/volume/aws_ebs/aws_util.go rename to pkg/volume/awsebs/aws_util.go index 95a5154a351..9f1d9a3d2e7 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/awsebs/aws_util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package aws_ebs +package awsebs import ( "fmt" @@ -45,8 +45,10 @@ const ( ebsMaxReplicasInAZ = 1 ) +// AWSDiskUtil provides operations for EBS volume. type AWSDiskUtil struct{} +// DeleteVolume deletes an AWS EBS volume. func (util *AWSDiskUtil) DeleteVolume(d *awsElasticBlockStoreDeleter) error { cloud, err := getCloudProvider(d.awsElasticBlockStore.plugin.host.GetCloudProvider()) if err != nil { @@ -198,11 +200,11 @@ func verifyDevicePath(devicePaths []string) (string, error) { func verifyAllPathsRemoved(devicePaths []string) (bool, error) { allPathsRemoved := true for _, path := range devicePaths { - if exists, err := volumeutil.PathExists(path); err != nil { + exists, err := volumeutil.PathExists(path) + if err != nil { return false, fmt.Errorf("Error checking if path exists: %v", err) - } else { - allPathsRemoved = allPathsRemoved && !exists } + allPathsRemoved = allPathsRemoved && !exists } return allPathsRemoved, nil @@ -211,7 +213,7 @@ func verifyAllPathsRemoved(devicePaths []string) (bool, error) { // Returns list of all paths for given EBS mount // This is more interesting on GCE (where we are able to identify volumes under /dev/disk-by-id) // Here it is mostly about applying the partition path -func getDiskByIdPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { +func getDiskByIDPaths(volumeID aws.KubernetesVolumeID, partition string, devicePath string) []string { devicePaths := []string{} if devicePath != "" { devicePaths = append(devicePaths, devicePath) diff --git a/pkg/volume/aws_ebs/doc.go b/pkg/volume/awsebs/doc.go similarity index 81% rename from pkg/volume/aws_ebs/doc.go rename to pkg/volume/awsebs/doc.go index 2d73712242b..3b2d21208e3 100644 --- a/pkg/volume/aws_ebs/doc.go +++ b/pkg/volume/awsebs/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package aws_ebs contains the internal representation of AWS Elastic +// Package awsebs contains the internal representation of AWS Elastic // Block Store volumes. -package aws_ebs // import "k8s.io/kubernetes/pkg/volume/aws_ebs" +package awsebs // import "k8s.io/kubernetes/pkg/volume/awsebs" diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index c37c3f20881..d332afb01d0 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -50,6 +50,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/cloudprovider:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index 7086cc06f80..f1274a7b86b 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -276,7 +276,7 @@ type cdManager interface { // Detaches the disk from the kubelet's host machine. DetachDisk(unmounter *cinderVolumeUnmounter) error // Creates a volume - CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) + CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) // Deletes a volume DeleteVolume(deleter *cinderVolumeDeleter) error } @@ -507,7 +507,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } - volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c) + volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies) if err != nil { return nil, err } @@ -550,6 +550,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo pv.Spec.AccessModes = c.plugin.GetAccessModes() } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + requirements := make([]v1.NodeSelectorRequirement, 0) + for k, v := range labels { + if v != "" { + requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}}) + } + } + if len(requirements) > 0 { + pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity) + pv.Spec.NodeAffinity.Required = new(v1.NodeSelector) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements + } + } + return pv, nil } diff --git a/pkg/volume/cinder/cinder_test.go b/pkg/volume/cinder/cinder_test.go index d16b3d09c9f..409cedcf23c 100644 --- a/pkg/volume/cinder/cinder_test.go +++ b/pkg/volume/cinder/cinder_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -116,8 +117,10 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error { return nil } -func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { - return "test-volume-name", 1, nil, "", nil +func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { + labels = make(map[string]string) + labels[kubeletapis.LabelZoneFailureDomain] = "nova" + return "test-volume-name", 1, labels, "", nil } func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error { @@ -210,6 +213,39 @@ func TestPlugin(t *testing.T) { t.Errorf("Provision() returned unexpected volume size: %v", size) } + // check nodeaffinity members + if persistentSpec.Spec.NodeAffinity == nil { + t.Errorf("Provision() returned unexpected nil NodeAffinity") + } + + if persistentSpec.Spec.NodeAffinity.Required == nil { + t.Errorf("Provision() returned unexpected nil NodeAffinity.Required") + } + + n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms) + if n != 1 { + t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1) + } + + n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions) + if n != 1 { + t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1) + } + + req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0] + + if req.Key != kubeletapis.LabelZoneFailureDomain { + t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key) + } + + if req.Operator != v1.NodeSelectorOpIn { + t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator) + } + + if len(req.Values) != 1 || req.Values[0] != "nova" { + t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values) + } + // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index ab115f83add..accc40a2a6e 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -162,7 +162,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { } // CreateVolume uses the cloud provider entrypoint for creating a volume -func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { +func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { cloud, err := c.plugin.getCloudProvider() if err != nil { return "", 0, nil, "", err @@ -207,7 +207,11 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // if we did not get any zones, lets leave it blank and gophercloud will // use zone "nova" as default if len(zones) > 0 { - availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name) + availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name) + if err != nil { + glog.V(2).Infof("error selecting zone for volume: %v", err) + return "", 0, nil, "", err + } } } @@ -221,8 +225,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // these are needed that pod is spawning to same AZ volumeLabels = make(map[string]string) if IgnoreVolumeAZ == false { - volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ - volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + if volumeAZ != "" { + volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + } + if volumeRegion != "" { + volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + } } return volumeID, volSizeGiB, volumeLabels, fstype, nil } diff --git a/pkg/volume/configmap/configmap.go b/pkg/volume/configmap/configmap.go index 897355af5c9..a1474d24595 100644 --- a/pkg/volume/configmap/configmap.go +++ b/pkg/volume/configmap/configmap.go @@ -203,13 +203,6 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } } - if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - return err - } - if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { - return err - } - totalBytes := totalBytes(configMap) glog.V(3).Infof("Received configMap %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, @@ -222,6 +215,29 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess := false + if err := wrapped.SetUpAt(dir, fsGroup); err != nil { + return err + } + if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { + return err + } + + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + glog.Errorf("Error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { @@ -240,6 +256,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } diff --git a/pkg/volume/configmap/configmap_test.go b/pkg/volume/configmap/configmap_test.go index eabf2c9d71e..4b97550e979 100644 --- a/pkg/volume/configmap/configmap_test.go +++ b/pkg/volume/configmap/configmap_test.go @@ -613,6 +613,66 @@ func volumeSpec(volumeName, configMapName string, defaultMode int32) *v1.Volume } } +func TestInvalidConfigMapSetup(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_configmap_namespace" + testName = "test_configmap_name" + + volumeSpec = volumeSpec(testVolumeName, testName, 0644) + configMap = configMap(testNamespace, testName) + client = fake.NewSimpleClientset(&configMap) + pluginMgr = volume.VolumePluginMgr{} + tempDir, host = newTestHost(t, client) + ) + volumeSpec.VolumeSource.ConfigMap.Items = []v1.KeyToPath{ + {Key: "missing", Path: "missing"}, + } + + defer os.RemoveAll(tempDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host) + + plugin, err := pluginMgr.FindPluginByName(configMapPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec)) + if err != nil { + t.Errorf("Failed to GetVolumeName: %v", err) + } + if vName != "test_volume_name/test_configmap_name" { + t.Errorf("Got unexpect VolumeName %v", vName) + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + fsGroup := int64(1001) + err = mounter.SetUp(&fsGroup) + if err == nil { + t.Errorf("Expected setup to fail") + } + _, err = os.Stat(volumePath) + if err == nil { + t.Errorf("Expected %s to not exist", volumePath) + } + + doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) +} + func configMap(namespace, name string) v1.ConfigMap { return v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/volume/csi/BUILD b/pkg/volume/csi/BUILD index 0d80f8238af..c9adb17cc5a 100644 --- a/pkg/volume/csi/BUILD +++ b/pkg/volume/csi/BUILD @@ -27,7 +27,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:go_default_library", diff --git a/pkg/volume/csi/csi_client.go b/pkg/volume/csi/csi_client.go index 4a4cb4176bf..36055db9aa9 100644 --- a/pkg/volume/csi/csi_client.go +++ b/pkg/volume/csi/csi_client.go @@ -95,6 +95,10 @@ func (c *csiDriverClient) NodeGetInfo(ctx context.Context) ( nodeClient := csipb.NewNodeClient(conn) res, err := nodeClient.NodeGetInfo(ctx, &csipb.NodeGetInfoRequest{}) + if err != nil { + return "", 0, nil, err + } + return res.GetNodeId(), res.GetMaxVolumesPerNode(), res.GetAccessibleTopology(), nil } diff --git a/pkg/volume/csi/csi_mounter.go b/pkg/volume/csi/csi_mounter.go index c1e59fb7b9b..28c0d9a93f0 100644 --- a/pkg/volume/csi/csi_mounter.go +++ b/pkg/volume/csi/csi_mounter.go @@ -206,31 +206,24 @@ func (c *csiMountMgr) SetUpAt(dir string, fsGroup *int64) error { } // apply volume ownership - if !c.readOnly && fsGroup != nil { - err := volume.SetVolumeOwnership(c, fsGroup) - if err != nil { - // attempt to rollback mount. - glog.Error(log("mounter.SetupAt failed to set fsgroup volume ownership for [%s]: %v", c.volumeID, err)) - glog.V(4).Info(log("mounter.SetupAt attempting to unpublish volume %s due to previous error", c.volumeID)) - if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil { - glog.Error(log( - "mounter.SetupAt failed to unpublish volume [%s]: %v (caused by previous NodePublish error: %v)", - c.volumeID, unpubErr, err, - )) - return fmt.Errorf("%v (caused by %v)", unpubErr, err) - } + // The following logic is derived from https://github.com/kubernetes/kubernetes/issues/66323 + // if fstype is "", then skip fsgroup (could be indication of non-block filesystem) + // if fstype is provided and pv.AccessMode == ReadWriteOnly, then apply fsgroup - if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil { - glog.Error(log( - "mounter.SetupAt failed to clean mount dir [%s]: %v (caused by previous NodePublish error: %v)", - dir, unmountErr, err, - )) - return fmt.Errorf("%v (caused by %v)", unmountErr, err) - } - - return err + err = c.applyFSGroup(fsType, fsGroup) + if err != nil { + // attempt to rollback mount. + fsGrpErr := fmt.Errorf("applyFSGroup failed for vol %s: %v", c.volumeID, err) + if unpubErr := csi.NodeUnpublishVolume(ctx, c.volumeID, dir); unpubErr != nil { + glog.Error(log("NodeUnpublishVolume failed for [%s]: %v", c.volumeID, unpubErr)) + return fsGrpErr } - glog.V(4).Info(log("mounter.SetupAt sets fsGroup to [%d] for %s", *fsGroup, c.volumeID)) + + if unmountErr := removeMountDir(c.plugin, dir); unmountErr != nil { + glog.Error(log("removeMountDir failed for [%s]: %v", dir, unmountErr)) + return fsGrpErr + } + return fsGrpErr } glog.V(4).Infof(log("mounter.SetUp successfully requested NodePublish [%s]", dir)) @@ -330,6 +323,43 @@ func (c *csiMountMgr) TearDownAt(dir string) error { return nil } +// applyFSGroup applies the volume ownership it derives its logic +// from https://github.com/kubernetes/kubernetes/issues/66323 +// 1) if fstype is "", then skip fsgroup (could be indication of non-block filesystem) +// 2) if fstype is provided and pv.AccessMode == ReadWriteOnly and !c.spec.ReadOnly then apply fsgroup +func (c *csiMountMgr) applyFSGroup(fsType string, fsGroup *int64) error { + if fsGroup != nil { + if fsType == "" { + glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, fsType not provided")) + return nil + } + + accessModes := c.spec.PersistentVolume.Spec.AccessModes + if c.spec.PersistentVolume.Spec.AccessModes == nil { + glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, access modes not provided")) + return nil + } + if !hasReadWriteOnce(accessModes) { + glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, only support ReadWriteOnce access mode")) + return nil + } + + if c.readOnly { + glog.V(4).Info(log("mounter.SetupAt WARNING: skipping fsGroup, volume is readOnly")) + return nil + } + + err := volume.SetVolumeOwnership(c, fsGroup) + if err != nil { + return err + } + + glog.V(4).Info(log("mounter.SetupAt fsGroup [%d] applied successfully to %s", *fsGroup, c.volumeID)) + } + + return nil +} + // isDirMounted returns the !notMounted result from IsLikelyNotMountPoint check func isDirMounted(plug *csiPlugin, dir string) (bool, error) { mounter := plug.host.GetMounter(plug.GetPluginName()) diff --git a/pkg/volume/csi/csi_mounter_test.go b/pkg/volume/csi/csi_mounter_test.go index cd404446665..b3b087281ac 100644 --- a/pkg/volume/csi/csi_mounter_test.go +++ b/pkg/volume/csi/csi_mounter_test.go @@ -262,6 +262,129 @@ func TestMounterSetUp(t *testing.T) { MounterSetUpTests(t, false) }) } +func TestMounterSetUpWithFSGroup(t *testing.T) { + fakeClient := fakeclient.NewSimpleClientset() + plug, tmpDir := newTestPlugin(t, fakeClient, nil) + defer os.RemoveAll(tmpDir) + + testCases := []struct { + name string + accessModes []api.PersistentVolumeAccessMode + readOnly bool + fsType string + setFsGroup bool + fsGroup int64 + }{ + { + name: "default fstype, with no fsgroup (should not apply fsgroup)", + accessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + readOnly: false, + fsType: "", + }, + { + name: "default fstype with fsgroup (should not apply fsgroup)", + accessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + readOnly: false, + fsType: "", + setFsGroup: true, + fsGroup: 3000, + }, + { + name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)", + accessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteMany, + api.ReadOnlyMany, + }, + fsType: "ext4", + setFsGroup: true, + fsGroup: 3000, + }, + { + name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)", + accessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + readOnly: true, + fsType: "ext4", + setFsGroup: true, + fsGroup: 3000, + }, + { + name: "fstype, fsgroup, RWO provided (should apply fsgroup)", + accessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + fsType: "ext4", + setFsGroup: true, + fsGroup: 3000, + }, + } + + for i, tc := range testCases { + t.Logf("Running test %s", tc.name) + + volName := fmt.Sprintf("test-vol-%d", i) + pv := makeTestPV("test-pv", 10, testDriver, volName) + pv.Spec.AccessModes = tc.accessModes + pvName := pv.GetName() + + spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly) + + if tc.fsType != "" { + spec.PersistentVolume.Spec.CSI.FSType = tc.fsType + } + + mounter, err := plug.NewMounter( + spec, + &api.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}}, + volume.VolumeOptions{}, + ) + if err != nil { + t.Fatalf("Failed to make a new Mounter: %v", err) + } + + if mounter == nil { + t.Fatal("failed to create CSI mounter") + } + + csiMounter := mounter.(*csiMountMgr) + csiMounter.csiClient = setupClient(t, true) + + attachID := getAttachmentName(csiMounter.volumeID, csiMounter.driverName, string(plug.host.GetNodeName())) + attachment := makeTestAttachment(attachID, "test-node", pvName) + + _, err = csiMounter.k8s.StorageV1beta1().VolumeAttachments().Create(attachment) + if err != nil { + t.Errorf("failed to setup VolumeAttachment: %v", err) + continue + } + + // Mounter.SetUp() + var fsGroupPtr *int64 + if tc.setFsGroup { + fsGroup := tc.fsGroup + fsGroupPtr = &fsGroup + } + if err := csiMounter.SetUp(fsGroupPtr); err != nil { + t.Fatalf("mounter.Setup failed: %v", err) + } + + //Test the default value of file system type is not overridden + if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) { + t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType) + } + + // ensure call went all the way + pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes() + if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() { + t.Error("csi server may not have received NodePublishVolume call") + } + } +} func TestUnmounterTeardown(t *testing.T) { plug, tmpDir := newTestPlugin(t, nil, nil) diff --git a/pkg/volume/csi/csi_plugin.go b/pkg/volume/csi/csi_plugin.go index 8d021d671c5..a9083e53594 100644 --- a/pkg/volume/csi/csi_plugin.go +++ b/pkg/volume/csi/csi_plugin.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" - csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" csiapiinformer "k8s.io/csi-api/pkg/client/informers/externalversions" csiinformer "k8s.io/csi-api/pkg/client/informers/externalversions/csi/v1alpha1" csilister "k8s.io/csi-api/pkg/client/listers/csi/v1alpha1" @@ -161,31 +160,22 @@ func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) { func (p *csiPlugin) Init(host volume.VolumeHost) error { p.host = host - kubeClient := host.GetKubeClient() - if kubeClient == nil { - return fmt.Errorf("error getting kube client") - } - - var csiClient csiclientset.Interface - if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) || - utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) { - csiClient = host.GetCSIClient() - if csiClient == nil { - return fmt.Errorf("error getting CSI client") - } - } - if utilfeature.DefaultFeatureGate.Enabled(features.CSIDriverRegistry) { - // Start informer for CSIDrivers. - factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod) - p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers() - p.csiDriverLister = p.csiDriverInformer.Lister() - go factory.Start(wait.NeverStop) + csiClient := host.GetCSIClient() + if csiClient == nil { + glog.Warning("The client for CSI Custom Resources is not available, skipping informer initialization") + } else { + // Start informer for CSIDrivers. + factory := csiapiinformer.NewSharedInformerFactory(csiClient, csiResyncPeriod) + p.csiDriverInformer = factory.Csi().V1alpha1().CSIDrivers() + p.csiDriverLister = p.csiDriverInformer.Lister() + go factory.Start(wait.NeverStop) + } } // Initializing csiDrivers map and label management channels csiDrivers = csiDriversStore{driversMap: map[string]csiDriver{}} - nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), kubeClient, csiClient) + nim = nodeinfomanager.NewNodeInfoManager(host.GetNodeName(), host) return nil } diff --git a/pkg/volume/csi/csi_util.go b/pkg/volume/csi/csi_util.go index 338333e0959..00d40fef39e 100644 --- a/pkg/volume/csi/csi_util.go +++ b/pkg/volume/csi/csi_util.go @@ -127,3 +127,16 @@ func getVolumeDeviceDataDir(specVolID string, host volume.VolumeHost) string { sanitizedSpecVolID := kstrings.EscapeQualifiedNameForDisk(specVolID) return path.Join(host.GetVolumeDevicePluginDir(csiPluginName), sanitizedSpecVolID, "data") } + +// hasReadWriteOnce returns true if modes contains v1.ReadWriteOnce +func hasReadWriteOnce(modes []api.PersistentVolumeAccessMode) bool { + if modes == nil { + return false + } + for _, mode := range modes { + if mode == api.ReadWriteOnce { + return true + } + } + return false +} diff --git a/pkg/volume/csi/nodeinfomanager/BUILD b/pkg/volume/csi/nodeinfomanager/BUILD index e83adfed6f0..86ac0efcd2c 100644 --- a/pkg/volume/csi/nodeinfomanager/BUILD +++ b/pkg/volume/csi/nodeinfomanager/BUILD @@ -7,6 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/features:go_default_library", + "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -15,10 +16,8 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/util/retry:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", - "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], @@ -45,6 +44,7 @@ go_test( deps = [ "//pkg/apis/core/helper:go_default_library", "//pkg/features:go_default_library", + "//pkg/volume/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -53,6 +53,7 @@ go_test( "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/util/testing:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned/fake:go_default_library", "//vendor/github.com/container-storage-interface/spec/lib/go/csi/v0:go_default_library", diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go index 7754dcf28f5..462914f9689 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager.go @@ -31,11 +31,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" - csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util" ) @@ -49,9 +48,8 @@ var nodeKind = v1.SchemeGroupVersion.WithKind("Node") // nodeInfoManager contains necessary common dependencies to update node info on both // the Node and CSINodeInfo objects. type nodeInfoManager struct { - nodeName types.NodeName - k8s kubernetes.Interface - csiKubeClient csiclientset.Interface + nodeName types.NodeName + volumeHost volume.VolumeHost } // If no updates is needed, the function must return the same Node object as the input. @@ -73,12 +71,10 @@ type Interface interface { // NewNodeInfoManager initializes nodeInfoManager func NewNodeInfoManager( nodeName types.NodeName, - kubeClient kubernetes.Interface, - csiKubeClient csiclientset.Interface) Interface { + volumeHost volume.VolumeHost) Interface { return &nodeInfoManager{ - nodeName: nodeName, - k8s: kubeClient, - csiKubeClient: csiKubeClient, + nodeName: nodeName, + volumeHost: volumeHost, } } @@ -143,7 +139,12 @@ func (nim *nodeInfoManager) updateNode(updateFuncs ...nodeUpdateFunc) error { // existing changes are not overwritten. RetryOnConflict uses // exponential backoff to avoid exhausting the apiserver. - nodeClient := nim.k8s.CoreV1().Nodes() + kubeClient := nim.volumeHost.GetKubeClient() + if kubeClient == nil { + return fmt.Errorf("error getting kube client") + } + + nodeClient := kubeClient.CoreV1().Nodes() node, err := nodeClient.Get(string(nim.nodeName), metav1.GetOptions{}) if err != nil { return err // do not wrap error @@ -315,12 +316,13 @@ func (nim *nodeInfoManager) updateCSINodeInfo( driverNodeID string, topology *csipb.Topology) error { - if nim.csiKubeClient == nil { - return fmt.Errorf("CSI client cannot be nil") + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") } retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nodeInfo, err := nim.csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) + nodeInfo, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Get(string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { return nim.createNodeInfoObject(driverName, driverNodeID, topology) } @@ -341,14 +343,24 @@ func (nim *nodeInfoManager) createNodeInfoObject( driverNodeID string, topology *csipb.Topology) error { - var topologyKeys []string + kubeClient := nim.volumeHost.GetKubeClient() + if kubeClient == nil { + return fmt.Errorf("error getting kube client") + } + + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + + topologyKeys := []string{} // must be an empty slice instead of nil to satisfy CRD OpenAPI Schema validation if topology != nil { for k := range topology.Segments { topologyKeys = append(topologyKeys, k) } } - node, err := nim.k8s.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{}) + node, err := kubeClient.CoreV1().Nodes().Get(string(nim.nodeName), metav1.GetOptions{}) if err != nil { return err // do not wrap error } @@ -374,7 +386,7 @@ func (nim *nodeInfoManager) createNodeInfoObject( }, } - _, err = nim.csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo) + _, err = csiKubeClient.CsiV1alpha1().CSINodeInfos().Create(nodeInfo) return err // do not wrap error } @@ -384,6 +396,11 @@ func (nim *nodeInfoManager) updateNodeInfoObject( driverNodeID string, topology *csipb.Topology) error { + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + topologyKeys := make(sets.String) if topology != nil { for k := range topology.Segments { @@ -416,14 +433,19 @@ func (nim *nodeInfoManager) updateNodeInfoObject( newDriverInfos = append(newDriverInfos, driverInfo) nodeInfo.CSIDrivers = newDriverInfos - _, err := nim.csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo) + _, err := csiKubeClient.CsiV1alpha1().CSINodeInfos().Update(nodeInfo) return err // do not wrap error } func (nim *nodeInfoManager) removeCSINodeInfo(csiDriverName string) error { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nodeInfoClient := nim.csiKubeClient.CsiV1alpha1().CSINodeInfos() + csiKubeClient := nim.volumeHost.GetCSIClient() + if csiKubeClient == nil { + return fmt.Errorf("error getting CSI client") + } + + nodeInfoClient := csiKubeClient.CsiV1alpha1().CSINodeInfos() nodeInfo, err := nodeInfoClient.Get(string(nim.nodeName), metav1.GetOptions{}) if nodeInfo == nil || errors.IsNotFound(err) { // do nothing diff --git a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go index a4c70ac56f5..122463bddef 100644 --- a/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go +++ b/pkg/volume/csi/nodeinfomanager/nodeinfomanager_test.go @@ -27,10 +27,12 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing" "k8s.io/client-go/kubernetes/fake" + utiltesting "k8s.io/client-go/util/testing" csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" csifake "k8s.io/csi-api/pkg/client/clientset/versioned/fake" "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" + volumetest "k8s.io/kubernetes/pkg/volume/testing" "testing" ) @@ -530,10 +532,23 @@ func TestAddNodeInfoExistingAnnotation(t *testing.T) { nodeName := tc.existingNode.Name client := fake.NewSimpleClientset(tc.existingNode) csiClient := csifake.NewSimpleClientset() - nim := NewNodeInfoManager(types.NodeName(nodeName), client, csiClient) + + tmpDir, err := utiltesting.MkTmpdir("nodeinfomanager-test") + if err != nil { + t.Fatalf("can't create temp dir: %v", err) + } + host := volumetest.NewFakeVolumeHostWithCSINodeName( + tmpDir, + client, + csiClient, + nil, + nodeName, + ) + + nim := NewNodeInfoManager(types.NodeName(nodeName), host) // Act - err := nim.AddNodeInfo(driverName, nodeID, 0 /* maxVolumeLimit */, nil) // TODO test maxVolumeLimit + err = nim.AddNodeInfo(driverName, nodeID, 0 /* maxVolumeLimit */, nil) // TODO test maxVolumeLimit if err != nil { t.Errorf("expected no error from AddNodeInfo call but got: %v", err) continue @@ -573,10 +588,21 @@ func test(t *testing.T, addNodeInfo bool, csiNodeInfoEnabled bool, testcases []t } else { csiClient = csifake.NewSimpleClientset(tc.existingNodeInfo) } - nim := NewNodeInfoManager(types.NodeName(nodeName), client, csiClient) + + tmpDir, err := utiltesting.MkTmpdir("nodeinfomanager-test") + if err != nil { + t.Fatalf("can't create temp dir: %v", err) + } + host := volumetest.NewFakeVolumeHostWithCSINodeName( + tmpDir, + client, + csiClient, + nil, + nodeName, + ) + nim := NewNodeInfoManager(types.NodeName(nodeName), host) //// Act - var err error if addNodeInfo { err = nim.AddNodeInfo(tc.driverName, tc.inputNodeID, 0 /* maxVolumeLimit */, tc.inputTopology) // TODO test maxVolumeLimit } else { diff --git a/pkg/volume/downwardapi/downwardapi.go b/pkg/volume/downwardapi/downwardapi.go index 88f7b68f759..11fe62fc777 100644 --- a/pkg/volume/downwardapi/downwardapi.go +++ b/pkg/volume/downwardapi/downwardapi.go @@ -185,6 +185,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess := false if err := wrapped.SetUpAt(dir, fsGroup); err != nil { glog.Errorf("Unable to setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) return err @@ -194,6 +195,21 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { @@ -213,6 +229,7 @@ func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess = true return nil } diff --git a/pkg/volume/flexvolume/driver-call.go b/pkg/volume/flexvolume/driver-call.go index 5b089df07a6..bd5a5cdb4f9 100644 --- a/pkg/volume/flexvolume/driver-call.go +++ b/pkg/volume/flexvolume/driver-call.go @@ -220,14 +220,18 @@ type DriverStatus struct { } type DriverCapabilities struct { - Attach bool `json:"attach"` - SELinuxRelabel bool `json:"selinuxRelabel"` + Attach bool `json:"attach"` + SELinuxRelabel bool `json:"selinuxRelabel"` + SupportsMetrics bool `json:"supportsMetrics"` + FSGroup bool `json:"fsGroup"` } func defaultCapabilities() *DriverCapabilities { return &DriverCapabilities{ - Attach: true, - SELinuxRelabel: true, + Attach: true, + SELinuxRelabel: true, + SupportsMetrics: false, + FSGroup: true, } } diff --git a/pkg/volume/flexvolume/mounter.go b/pkg/volume/flexvolume/mounter.go index 5085293cc17..8c246043cb7 100644 --- a/pkg/volume/flexvolume/mounter.go +++ b/pkg/volume/flexvolume/mounter.go @@ -32,7 +32,6 @@ type flexVolumeMounter struct { // the considered volume spec spec *volume.Spec readOnly bool - volume.MetricsNil } var _ volume.Mounter = &flexVolumeMounter{} @@ -93,7 +92,9 @@ func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } if !f.readOnly { - volume.SetVolumeOwnership(f, fsGroup) + if f.plugin.capabilities.FSGroup { + volume.SetVolumeOwnership(f, fsGroup) + } } return nil diff --git a/pkg/volume/flexvolume/plugin.go b/pkg/volume/flexvolume/plugin.go index 70d4b009f6f..6ddd455490f 100644 --- a/pkg/volume/flexvolume/plugin.go +++ b/pkg/volume/flexvolume/plugin.go @@ -177,6 +177,14 @@ func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.P return nil, err } + var metricsProvider volume.MetricsProvider + if plugin.capabilities.SupportsMetrics { + metricsProvider = volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir( + pod.UID, utilstrings.EscapeQualifiedNameForDisk(sourceDriver), spec.Name())) + } else { + metricsProvider = &volume.MetricsNil{} + } + return &flexVolumeMounter{ flexVolume: &flexVolume{ driverName: sourceDriver, @@ -188,6 +196,7 @@ func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.P podNamespace: pod.Namespace, podServiceAccountName: pod.Spec.ServiceAccountName, volName: spec.Name(), + MetricsProvider: metricsProvider, }, runner: runner, spec: spec, @@ -202,14 +211,23 @@ func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) ( // newUnmounterInternal is the internal unmounter routine to clean the volume. func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, runner exec.Interface) (volume.Unmounter, error) { + var metricsProvider volume.MetricsProvider + if plugin.capabilities.SupportsMetrics { + metricsProvider = volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir( + podUID, utilstrings.EscapeQualifiedNameForDisk(plugin.driverName), volName)) + } else { + metricsProvider = &volume.MetricsNil{} + } + return &flexVolumeUnmounter{ flexVolume: &flexVolume{ - driverName: plugin.driverName, - execPath: plugin.getExecutable(), - mounter: mounter, - plugin: plugin, - podUID: podUID, - volName: volName, + driverName: plugin.driverName, + execPath: plugin.getExecutable(), + mounter: mounter, + plugin: plugin, + podUID: podUID, + volName: volName, + MetricsProvider: metricsProvider, }, runner: runner, }, nil diff --git a/pkg/volume/flexvolume/unmounter.go b/pkg/volume/flexvolume/unmounter.go index 67e26fe15cc..406c7f84f04 100644 --- a/pkg/volume/flexvolume/unmounter.go +++ b/pkg/volume/flexvolume/unmounter.go @@ -31,7 +31,6 @@ type flexVolumeUnmounter struct { *flexVolume // Runner used to teardown the volume. runner exec.Interface - volume.MetricsNil } var _ volume.Unmounter = &flexVolumeUnmounter{} diff --git a/pkg/volume/flexvolume/volume.go b/pkg/volume/flexvolume/volume.go index abeeb052970..f4988d839e6 100644 --- a/pkg/volume/flexvolume/volume.go +++ b/pkg/volume/flexvolume/volume.go @@ -20,6 +20,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/util/mount" utilstrings "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" ) type flexVolume struct { @@ -42,6 +43,8 @@ type flexVolume struct { volName string // the underlying plugin plugin *flexVolumePlugin + // the metric plugin + volume.MetricsProvider } // volume.Volume interface diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 4e29984e431..39b83a2f376 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -319,10 +319,11 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error { // its own log based on PV + Pod log = path.Join(p, b.pod.Name+"-glusterfs.log") + // Use derived log file in gluster fuse mount + options = append(options, "log-file="+log) + } - // Use derived/provided log file in gluster fuse mount - options = append(options, "log-file="+log) options = append(options, "log-level=ERROR") var addrlist []string diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 1edb0bacac3..8c875e7dd77 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -269,10 +269,9 @@ var _ volume.Mounter = &portworxVolumeMounter{} func (b *portworxVolumeMounter) GetAttributes() volume.Attributes { return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: !b.readOnly, - // true ? - SupportsSELinux: true, + ReadOnly: b.readOnly, + Managed: !b.readOnly, + SupportsSELinux: false, } } diff --git a/pkg/volume/projected/projected.go b/pkg/volume/projected/projected.go index ebc0a6f0d70..7c2f583e7fc 100644 --- a/pkg/volume/projected/projected.go +++ b/pkg/volume/projected/projected.go @@ -198,6 +198,8 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error()) return err } + + setupSuccess := false if err := wrapped.SetUpAt(dir, fsGroup); err != nil { return err } @@ -206,6 +208,21 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := s.plugin.NewUnmounter(s.volName, s.podUID) + if unmountCreateErr != nil { + glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", s.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + glog.Errorf("error tearing down volume %s with : %v", s.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", s.pod.Namespace, s.pod.Name, s.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { @@ -224,6 +241,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } diff --git a/pkg/volume/projected/projected_test.go b/pkg/volume/projected/projected_test.go index f08a8ddeb56..78f16bf6e03 100644 --- a/pkg/volume/projected/projected_test.go +++ b/pkg/volume/projected/projected_test.go @@ -790,6 +790,56 @@ func TestPlugin(t *testing.T) { defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) } +func TestInvalidPathProjected(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_projected_namespace" + testName = "test_projected_name" + + volumeSpec = makeVolumeSpec(testVolumeName, testName, 0644) + secret = makeSecret(testNamespace, testName) + client = fake.NewSimpleClientset(&secret) + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + volumeSpec.Projected.Sources[0].Secret.Items = []v1.KeyToPath{ + {Key: "missing", Path: "missing"}, + } + + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host) + + plugin, err := pluginMgr.FindPluginByName(projectedPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Errorf("Got a nil Mounter") + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~projected/%s", testVolumeName)) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err == nil { + t.Errorf("Expected error while setting up secret") + } + + _, err = os.Stat(volumePath) + if err == nil { + t.Errorf("Expected path %s to not exist", volumePath) + } +} + // Test the case where the plugin's ready file exists, but the volume dir is not a // mountpoint, which is the state the system will be in after reboot. The dir // should be mounter and the secret data written to it. diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index c0126c4d60a..dd66311c21b 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -395,39 +395,38 @@ func (util *RBDUtil) AttachDisk(b rbdMounter) (string, error) { Steps: rbdImageWatcherSteps, } needValidUsed := true - // If accessModes contain ReadOnlyMany, we don't need check rbd status of being used. if b.accessModes != nil { - for _, v := range b.accessModes { - if v != v1.ReadWriteOnce { - needValidUsed = false - break + // If accessModes only contains ReadOnlyMany, we don't need check rbd status of being used. + if len(b.accessModes) == 1 && b.accessModes[0] == v1.ReadOnlyMany { + needValidUsed = false + } + } + // If accessModes is nil, the volume is referenced by in-line volume. + // We can assume the AccessModes to be {"RWO" and "ROX"}, which is what the volume plugin supports. + // We do not need to consider ReadOnly here, because it is used for VolumeMounts. + + if needValidUsed { + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + used, rbdOutput, err := util.rbdStatus(&b) + if err != nil { + return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) } + return !used, nil + }) + // Return error if rbd image has not become available for the specified timeout. + if err == wait.ErrWaitTimeout { + return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image) } - } else { - // ReadOnly rbd volume should not check rbd status of being used to - // support mounted as read-only by multiple consumers simultaneously. - needValidUsed = !b.rbd.ReadOnly - } - err := wait.ExponentialBackoff(backoff, func() (bool, error) { - used, rbdOutput, err := util.rbdStatus(&b) + // Return error if any other errors were encountered during wating for the image to become available. if err != nil { - return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) + return "", err } - return !needValidUsed || !used, nil - }) - // Return error if rbd image has not become available for the specified timeout. - if err == wait.ErrWaitTimeout { - return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image) - } - // Return error if any other errors were encountered during wating for the image to become available. - if err != nil { - return "", err } mon := util.kernelRBDMonitorsOpt(b.Mon) glog.V(1).Infof("rbd: map mon %s", mon) - _, err = b.exec.Run("modprobe", "rbd") + _, err := b.exec.Run("modprobe", "rbd") if err != nil { glog.Warningf("rbd: failed to load rbd kernel module:%v", err) } diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index dda1eec335b..065b9bfb5af 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -202,13 +202,6 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { } } - if err := wrapped.SetUpAt(dir, fsGroup); err != nil { - return err - } - if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { - return err - } - totalBytes := totalSecretBytes(secret) glog.V(3).Infof("Received secret %v/%v containing (%v) pieces of data, %v total bytes", b.pod.Namespace, @@ -221,6 +214,29 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { return err } + setupSuccess := false + if err := wrapped.SetUpAt(dir, fsGroup); err != nil { + return err + } + if err := volumeutil.MakeNestedMountpoints(b.volName, dir, b.pod); err != nil { + return err + } + + defer func() { + // Clean up directories if setup fails + if !setupSuccess { + unmounter, unmountCreateErr := b.plugin.NewUnmounter(b.volName, b.podUID) + if unmountCreateErr != nil { + glog.Errorf("error cleaning up mount %s after failure. Create unmounter failed with %v", b.volName, unmountCreateErr) + return + } + tearDownErr := unmounter.TearDown() + if tearDownErr != nil { + glog.Errorf("error tearing down volume %s with : %v", b.volName, tearDownErr) + } + } + }() + writerContext := fmt.Sprintf("pod %v/%v volume %v", b.pod.Namespace, b.pod.Name, b.volName) writer, err := volumeutil.NewAtomicWriter(dir, writerContext) if err != nil { @@ -239,6 +255,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error { glog.Errorf("Error applying volume ownership settings for group: %v", fsGroup) return err } + setupSuccess = true return nil } diff --git a/pkg/volume/secret/secret_test.go b/pkg/volume/secret/secret_test.go index 59185050955..9fb0e4d040a 100644 --- a/pkg/volume/secret/secret_test.go +++ b/pkg/volume/secret/secret_test.go @@ -362,6 +362,56 @@ func TestPlugin(t *testing.T) { } } +func TestInvalidPathSecret(t *testing.T) { + var ( + testPodUID = types.UID("test_pod_uid") + testVolumeName = "test_volume_name" + testNamespace = "test_secret_namespace" + testName = "test_secret_name" + + volumeSpec = volumeSpec(testVolumeName, testName, 0644) + secret = secret(testNamespace, testName) + client = fake.NewSimpleClientset(&secret) + pluginMgr = volume.VolumePluginMgr{} + rootDir, host = newTestHost(t, client) + ) + volumeSpec.Secret.Items = []v1.KeyToPath{ + {Key: "missing", Path: "missing"}, + } + + defer os.RemoveAll(rootDir) + pluginMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, host) + + plugin, err := pluginMgr.FindPluginByName(secretPluginName) + if err != nil { + t.Errorf("Can't find the plugin by name") + } + + pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}} + mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) + if err != nil { + t.Errorf("Failed to make a new Mounter: %v", err) + } + if mounter == nil { + t.Fatalf("Got a nil Mounter") + } + + volumePath := mounter.GetPath() + if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) { + t.Errorf("Got unexpected path: %s", volumePath) + } + + err = mounter.SetUp(nil) + if err == nil { + t.Errorf("Expected error while setting up secret") + } + + _, err = os.Stat(volumePath) + if err == nil { + t.Errorf("Expected path %s to not exist", volumePath) + } +} + // Test the case where the plugin's ready file exists, but the volume dir is not a // mountpoint, which is the state the system will be in after reboot. The dir // should be mounter and the secret data written to it. diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 57c7adfa7cb..154e3c8a2df 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -1433,6 +1433,8 @@ func isDeviceOpened(deviceToDetach AttachedVolume, mounter mount.Interface) (boo //TODO: refer to #36092 glog.V(3).Infof("The path isn't device path or doesn't exist. Skip checking device path: %s", deviceToDetach.DevicePath) deviceOpened = false + } else if devicePathErr != nil { + return false, deviceToDetach.GenerateErrorDetailed("PathIsDevice failed", devicePathErr) } else { deviceOpened, deviceOpenedErr = mounter.DeviceOpened(deviceToDetach.DevicePath) if deviceOpenedErr != nil { diff --git a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go index cf325fa9f2b..09fdc9fc39b 100644 --- a/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go +++ b/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit // +k8s:defaulter-gen=TypeMeta +// +groupName=eventratelimit.admission.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=eventratelimit.admission.k8s.io package v1alpha1 // import "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1" diff --git a/plugin/pkg/admission/exec/BUILD b/plugin/pkg/admission/exec/BUILD index aed101de94d..4d571efc75b 100644 --- a/plugin/pkg/admission/exec/BUILD +++ b/plugin/pkg/admission/exec/BUILD @@ -11,11 +11,11 @@ go_library( srcs = ["admission.go"], importpath = "k8s.io/kubernetes/plugin/pkg/admission/exec", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", ], ) @@ -25,10 +25,11 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) diff --git a/plugin/pkg/admission/exec/admission.go b/plugin/pkg/admission/exec/admission.go index 00e3b6cfbca..b6cbf94bc3a 100644 --- a/plugin/pkg/admission/exec/admission.go +++ b/plugin/pkg/admission/exec/admission.go @@ -20,11 +20,11 @@ import ( "fmt" "io" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/kubernetes" ) const ( @@ -52,7 +52,7 @@ func Register(plugins *admission.Plugins) { // a pod using host based configurations. type DenyExec struct { *admission.Handler - client internalclientset.Interface + client kubernetes.Interface // these flags control which items will be checked to deny exec/attach hostNetwork bool @@ -62,20 +62,7 @@ type DenyExec struct { } var _ admission.ValidationInterface = &DenyExec{} - -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&DenyExec{}) - -// NewDenyEscalatingExec creates a new admission controller that denies an exec operation on a pod -// using host based configurations. -func NewDenyEscalatingExec() *DenyExec { - return &DenyExec{ - Handler: admission.NewHandler(admission.Connect), - hostNetwork: true, - hostIPC: true, - hostPID: true, - privileged: true, - } -} +var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&DenyExec{}) // NewDenyExecOnPrivileged creates a new admission controller that is only checking the privileged // option. This is for legacy support of the DenyExecOnPrivileged admission controller. @@ -90,6 +77,31 @@ func NewDenyExecOnPrivileged() *DenyExec { } } +// NewDenyEscalatingExec creates a new admission controller that denies an exec operation on a pod +// using host based configurations. +func NewDenyEscalatingExec() *DenyExec { + return &DenyExec{ + Handler: admission.NewHandler(admission.Connect), + hostNetwork: true, + hostIPC: true, + hostPID: true, + privileged: true, + } +} + +// SetExternalKubeClientSet implements the WantsInternalKubeClientSet interface. +func (d *DenyExec) SetExternalKubeClientSet(client kubernetes.Interface) { + d.client = client +} + +// ValidateInitialization implements the InitializationValidator interface. +func (d *DenyExec) ValidateInitialization() error { + if d.client == nil { + return fmt.Errorf("missing client") + } + return nil +} + // Validate makes an admission decision based on the request attributes func (d *DenyExec) Validate(a admission.Attributes) (err error) { path := a.GetResource().Resource @@ -100,24 +112,21 @@ func (d *DenyExec) Validate(a admission.Attributes) (err error) { if path != "pods/exec" && path != "pods/attach" { return nil } - pod, err := d.client.Core().Pods(a.GetNamespace()).Get(a.GetName(), metav1.GetOptions{}) + pod, err := d.client.CoreV1().Pods(a.GetNamespace()).Get(a.GetName(), metav1.GetOptions{}) if err != nil { return admission.NewForbidden(a, err) } - if pod.Spec.SecurityContext != nil { - securityContext := pod.Spec.SecurityContext - if d.hostNetwork && securityContext.HostNetwork { - return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host network")) - } + if d.hostNetwork && pod.Spec.HostNetwork { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host network")) + } - if d.hostPID && securityContext.HostPID { - return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host pid")) - } + if d.hostPID && pod.Spec.HostPID { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host pid")) + } - if d.hostIPC && securityContext.HostIPC { - return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host ipc")) - } + if d.hostIPC && pod.Spec.HostIPC { + return admission.NewForbidden(a, fmt.Errorf("cannot exec into or attach to a container using host ipc")) } if d.privileged && isPrivileged(pod) { @@ -128,7 +137,7 @@ func (d *DenyExec) Validate(a admission.Attributes) (err error) { } // isPrivileged will return true a pod has any privileged containers -func isPrivileged(pod *api.Pod) bool { +func isPrivileged(pod *corev1.Pod) bool { for _, c := range pod.Spec.InitContainers { if c.SecurityContext == nil || c.SecurityContext.Privileged == nil { continue @@ -147,16 +156,3 @@ func isPrivileged(pod *api.Pod) bool { } return false } - -// SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. -func (d *DenyExec) SetInternalKubeClientSet(client internalclientset.Interface) { - d.client = client -} - -// ValidateInitialization implements the InitializationValidator interface. -func (d *DenyExec) ValidateInitialization() error { - if d.client == nil { - return fmt.Errorf("missing client") - } - return nil -} diff --git a/plugin/pkg/admission/exec/admission_test.go b/plugin/pkg/admission/exec/admission_test.go index 89370d1c57c..c8d0b665c1b 100644 --- a/plugin/pkg/admission/exec/admission_test.go +++ b/plugin/pkg/admission/exec/admission_test.go @@ -19,12 +19,13 @@ package exec import ( "testing" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" + "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" ) // newAllowEscalatingExec returns `admission.Interface` that allows execution on @@ -41,20 +42,18 @@ func newAllowEscalatingExec() *DenyExec { func TestAdmission(t *testing.T) { privPod := validPod("privileged") priv := true - privPod.Spec.Containers[0].SecurityContext = &api.SecurityContext{ + privPod.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{ Privileged: &priv, } hostPIDPod := validPod("hostPID") - hostPIDPod.Spec.SecurityContext = &api.PodSecurityContext{} - hostPIDPod.Spec.SecurityContext.HostPID = true + hostPIDPod.Spec.HostPID = true hostIPCPod := validPod("hostIPC") - hostIPCPod.Spec.SecurityContext = &api.PodSecurityContext{} - hostIPCPod.Spec.SecurityContext.HostIPC = true + hostIPCPod.Spec.HostIPC = true testCases := map[string]struct { - pod *api.Pod + pod *corev1.Pod shouldAccept bool }{ "priv": { @@ -106,7 +105,7 @@ func TestAdmission(t *testing.T) { } } -func testAdmission(t *testing.T, pod *api.Pod, handler *DenyExec, shouldAccept bool) { +func testAdmission(t *testing.T, pod *corev1.Pod, handler *DenyExec, shouldAccept bool) { mockClient := &fake.Clientset{} mockClient.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) { if action.(core.GetAction).GetName() == pod.Name { @@ -116,7 +115,7 @@ func testAdmission(t *testing.T, pod *api.Pod, handler *DenyExec, shouldAccept b return true, nil, nil }) - handler.SetInternalKubeClientSet(mockClient) + handler.SetExternalKubeClientSet(mockClient) admission.ValidateInitialization(handler) // pods/exec @@ -146,20 +145,18 @@ func testAdmission(t *testing.T, pod *api.Pod, handler *DenyExec, shouldAccept b func TestDenyExecOnPrivileged(t *testing.T) { privPod := validPod("privileged") priv := true - privPod.Spec.Containers[0].SecurityContext = &api.SecurityContext{ + privPod.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{ Privileged: &priv, } hostPIDPod := validPod("hostPID") - hostPIDPod.Spec.SecurityContext = &api.PodSecurityContext{} - hostPIDPod.Spec.SecurityContext.HostPID = true + hostPIDPod.Spec.HostPID = true hostIPCPod := validPod("hostIPC") - hostIPCPod.Spec.SecurityContext = &api.PodSecurityContext{} - hostIPCPod.Spec.SecurityContext.HostIPC = true + hostIPCPod.Spec.HostIPC = true testCases := map[string]struct { - pod *api.Pod + pod *corev1.Pod shouldAccept bool }{ "priv": { @@ -195,11 +192,11 @@ func TestDenyExecOnPrivileged(t *testing.T) { } } -func validPod(name string) *api.Pod { - return &api.Pod{ +func validPod(name string) *corev1.Pod { + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"}, - Spec: api.PodSpec{ - Containers: []api.Container{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ {Name: "ctr1", Image: "image"}, {Name: "ctr2", Image: "image2"}, }, diff --git a/plugin/pkg/admission/limitranger/BUILD b/plugin/pkg/admission/limitranger/BUILD index 3edd65f17be..4d82b35c860 100644 --- a/plugin/pkg/admission/limitranger/BUILD +++ b/plugin/pkg/admission/limitranger/BUILD @@ -15,10 +15,7 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/admission/limitranger", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/core/internalversion:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -26,6 +23,10 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", ], ) @@ -36,16 +37,18 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index 3659cbb7380..c2ada02b0df 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -23,8 +23,9 @@ import ( "strings" "time" - lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/golang-lru" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,11 +33,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/admission" + genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" - kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" ) const ( @@ -55,9 +56,9 @@ func Register(plugins *admission.Plugins) { // LimitRanger enforces usage limits on a per resource basis in the namespace type LimitRanger struct { *admission.Handler - client internalclientset.Interface + client kubernetes.Interface actions LimitRangerActions - lister corelisters.LimitRangeLister + lister corev1listers.LimitRangeLister // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This let's us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. @@ -68,19 +69,25 @@ type LimitRanger struct { var _ admission.MutationInterface = &LimitRanger{} var _ admission.ValidationInterface = &LimitRanger{} -var _ kubeapiserveradmission.WantsInternalKubeInformerFactory = &LimitRanger{} + +var _ genericadmissioninitailizer.WantsExternalKubeInformerFactory = &LimitRanger{} +var _ genericadmissioninitailizer.WantsExternalKubeClientSet = &LimitRanger{} type liveLookupEntry struct { expiry time.Time - items []*api.LimitRange + items []*corev1.LimitRange } -func (l *LimitRanger) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { - limitRangeInformer := f.Core().InternalVersion().LimitRanges() +func (l *LimitRanger) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + limitRangeInformer := f.Core().V1().LimitRanges() l.SetReadyFunc(limitRangeInformer.Informer().HasSynced) l.lister = limitRangeInformer.Lister() } +func (a *LimitRanger) SetExternalKubeClientSet(client kubernetes.Interface) { + a.client = client +} + func (l *LimitRanger) ValidateInitialization() error { if l.lister == nil { return fmt.Errorf("missing limitRange lister") @@ -101,20 +108,11 @@ func (l *LimitRanger) Validate(a admission.Attributes) (err error) { return l.runLimitFunc(a, l.actions.ValidateLimit) } -func (l *LimitRanger) runLimitFunc(a admission.Attributes, limitFn func(limitRange *api.LimitRange, kind string, obj runtime.Object) error) (err error) { +func (l *LimitRanger) runLimitFunc(a admission.Attributes, limitFn func(limitRange *corev1.LimitRange, kind string, obj runtime.Object) error) (err error) { if !l.actions.SupportsAttributes(a) { return nil } - obj := a.GetObject() - name := "Unknown" - if obj != nil { - name, _ = meta.NewAccessor().Name(obj) - if len(name) == 0 { - name, _ = meta.NewAccessor().GenerateName(obj) - } - } - // ignore all objects marked for deletion oldObj := a.GetOldObject() if oldObj != nil { @@ -148,7 +146,7 @@ func (l *LimitRanger) runLimitFunc(a admission.Attributes, limitFn func(limitRan return nil } -func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*api.LimitRange, error) { +func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*corev1.LimitRange, error) { items, err := l.lister.LimitRanges(a.GetNamespace()).List(labels.Everything()) if err != nil { return nil, admission.NewForbidden(a, fmt.Errorf("unable to %s %v at this time because there was an error enforcing limit ranges", a.GetOperation(), a.GetResource())) @@ -163,7 +161,7 @@ func (l *LimitRanger) GetLimitRanges(a admission.Attributes) ([]*api.LimitRange, // If there is already in-flight List() for a given namespace, we should wait until // it is finished and cache is updated instead of doing the same, also to avoid // throttling - see #22422 for details. - liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{}) + liveList, err := l.client.CoreV1().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{}) if err != nil { return nil, admission.NewForbidden(a, err) } @@ -204,31 +202,24 @@ func NewLimitRanger(actions LimitRangerActions) (*LimitRanger, error) { }, nil } -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&LimitRanger{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&LimitRanger{}) - -func (a *LimitRanger) SetInternalKubeClientSet(client internalclientset.Interface) { - a.client = client -} - // defaultContainerResourceRequirements returns the default requirements for a container // the requirement.Limits are taken from the LimitRange defaults (if specified) // the requirement.Requests are taken from the LimitRange default request (if specified) -func defaultContainerResourceRequirements(limitRange *api.LimitRange) api.ResourceRequirements { +func defaultContainerResourceRequirements(limitRange *corev1.LimitRange) api.ResourceRequirements { requirements := api.ResourceRequirements{} requirements.Requests = api.ResourceList{} requirements.Limits = api.ResourceList{} for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] - if limit.Type == api.LimitTypeContainer { + if limit.Type == corev1.LimitTypeContainer { for k, v := range limit.DefaultRequest { value := v.Copy() - requirements.Requests[k] = *value + requirements.Requests[api.ResourceName(k)] = *value } for k, v := range limit.Default { value := v.Copy() - requirements.Limits[k] = *value + requirements.Limits[api.ResourceName(k)] = *value } } } @@ -309,9 +300,9 @@ func requestLimitEnforcedValues(requestQuantity, limitQuantity, enforcedQuantity } // minConstraint enforces the min constraint over the specified resource -func minConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { - req, reqExists := request[resourceName] - lim, limExists := limit[resourceName] +func minConstraint(limitType string, resourceName string, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { + req, reqExists := request[api.ResourceName(resourceName)] + lim, limExists := limit[api.ResourceName(resourceName)] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) if !reqExists { @@ -328,8 +319,8 @@ func minConstraint(limitType api.LimitType, resourceName api.ResourceName, enfor // maxRequestConstraint enforces the max constraint over the specified resource // use when specify LimitType resource doesn't recognize limit values -func maxRequestConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList) error { - req, reqExists := request[resourceName] +func maxRequestConstraint(limitType string, resourceName string, enforced resource.Quantity, request api.ResourceList) error { + req, reqExists := request[api.ResourceName(resourceName)] observedReqValue, _, enforcedValue := requestLimitEnforcedValues(req, resource.Quantity{}, enforced) if !reqExists { @@ -342,9 +333,9 @@ func maxRequestConstraint(limitType api.LimitType, resourceName api.ResourceName } // maxConstraint enforces the max constraint over the specified resource -func maxConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { - req, reqExists := request[resourceName] - lim, limExists := limit[resourceName] +func maxConstraint(limitType string, resourceName string, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { + req, reqExists := request[api.ResourceName(resourceName)] + lim, limExists := limit[api.ResourceName(resourceName)] observedReqValue, observedLimValue, enforcedValue := requestLimitEnforcedValues(req, lim, enforced) if !limExists { @@ -360,9 +351,9 @@ func maxConstraint(limitType api.LimitType, resourceName api.ResourceName, enfor } // limitRequestRatioConstraint enforces the limit to request ratio over the specified resource -func limitRequestRatioConstraint(limitType api.LimitType, resourceName api.ResourceName, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { - req, reqExists := request[resourceName] - lim, limExists := limit[resourceName] +func limitRequestRatioConstraint(limitType string, resourceName string, enforced resource.Quantity, request api.ResourceList, limit api.ResourceList) error { + req, reqExists := request[api.ResourceName(resourceName)] + lim, limExists := limit[api.ResourceName(resourceName)] observedReqValue, observedLimValue, _ := requestLimitEnforcedValues(req, lim, enforced) if !reqExists || (observedReqValue == int64(0)) { @@ -435,7 +426,7 @@ var _ LimitRangerActions = &DefaultLimitRangerActions{} // Limit enforces resource requirements of incoming resources against enumerated constraints // on the LimitRange. It may modify the incoming object to apply default resource requirements // if not specified, and enumerated on the LimitRange -func (d *DefaultLimitRangerActions) MutateLimit(limitRange *api.LimitRange, resourceName string, obj runtime.Object) error { +func (d *DefaultLimitRangerActions) MutateLimit(limitRange *corev1.LimitRange, resourceName string, obj runtime.Object) error { switch resourceName { case "pods": return PodMutateLimitFunc(limitRange, obj.(*api.Pod)) @@ -446,7 +437,7 @@ func (d *DefaultLimitRangerActions) MutateLimit(limitRange *api.LimitRange, reso // Limit enforces resource requirements of incoming resources against enumerated constraints // on the LimitRange. It may modify the incoming object to apply default resource requirements // if not specified, and enumerated on the LimitRange -func (d *DefaultLimitRangerActions) ValidateLimit(limitRange *api.LimitRange, resourceName string, obj runtime.Object) error { +func (d *DefaultLimitRangerActions) ValidateLimit(limitRange *corev1.LimitRange, resourceName string, obj runtime.Object) error { switch resourceName { case "pods": return PodValidateLimitFunc(limitRange, obj.(*api.Pod)) @@ -467,7 +458,7 @@ func (d *DefaultLimitRangerActions) SupportsAttributes(a admission.Attributes) b } // SupportsLimit always returns true. -func (d *DefaultLimitRangerActions) SupportsLimit(limitRange *api.LimitRange) bool { +func (d *DefaultLimitRangerActions) SupportsLimit(limitRange *corev1.LimitRange) bool { return true } @@ -475,22 +466,22 @@ func (d *DefaultLimitRangerActions) SupportsLimit(limitRange *api.LimitRange) bo // Users request storage via pvc.Spec.Resources.Requests. Min/Max is enforced by an admin with LimitRange. // Claims will not be modified with default values because storage is a required part of pvc.Spec. // All storage enforced values *only* apply to pvc.Spec.Resources.Requests. -func PersistentVolumeClaimValidateLimitFunc(limitRange *api.LimitRange, pvc *api.PersistentVolumeClaim) error { +func PersistentVolumeClaimValidateLimitFunc(limitRange *corev1.LimitRange, pvc *api.PersistentVolumeClaim) error { var errs []error for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] limitType := limit.Type - if limitType == api.LimitTypePersistentVolumeClaim { + if limitType == corev1.LimitTypePersistentVolumeClaim { for k, v := range limit.Min { // normal usage of minConstraint. pvc.Spec.Resources.Limits is not recognized as user input - if err := minConstraint(limitType, k, v, pvc.Spec.Resources.Requests, api.ResourceList{}); err != nil { + if err := minConstraint(string(limitType), string(k), v, pvc.Spec.Resources.Requests, api.ResourceList{}); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { // We want to enforce the max of the LimitRange against what // the user requested. - if err := maxRequestConstraint(limitType, k, v, pvc.Spec.Resources.Requests); err != nil { + if err := maxRequestConstraint(string(limitType), string(k), v, pvc.Spec.Resources.Requests); err != nil { errs = append(errs, err) } } @@ -502,7 +493,7 @@ func PersistentVolumeClaimValidateLimitFunc(limitRange *api.LimitRange, pvc *api // PodMutateLimitFunc sets resource requirements enumerated by the pod against // the specified LimitRange. The pod may be modified to apply default resource // requirements if not specified, and enumerated on the LimitRange -func PodMutateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { +func PodMutateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error { defaultResources := defaultContainerResourceRequirements(limitRange) mergePodResourceRequirements(pod, &defaultResources) return nil @@ -510,28 +501,28 @@ func PodMutateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { // PodValidateLimitFunc enforces resource requirements enumerated by the pod against // the specified LimitRange. -func PodValidateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { +func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error { var errs []error for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] limitType := limit.Type // enforce container limits - if limitType == api.LimitTypeContainer { + if limitType == corev1.LimitTypeContainer { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k, v := range limit.Min { - if err := minConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := minConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { - if err := maxConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := maxConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { - if err := limitRequestRatioConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := limitRequestRatioConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } @@ -539,17 +530,17 @@ func PodValidateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { for j := range pod.Spec.InitContainers { container := &pod.Spec.InitContainers[j] for k, v := range limit.Min { - if err := minConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := minConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { - if err := maxConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := maxConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { - if err := limitRequestRatioConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { + if err := limitRequestRatioConstraint(string(limitType), string(k), v, container.Resources.Requests, container.Resources.Limits); err != nil { errs = append(errs, err) } } @@ -557,7 +548,7 @@ func PodValidateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { } // enforce pod limits on init containers - if limitType == api.LimitTypePod { + if limitType == corev1.LimitTypePod { containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{} for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] @@ -589,17 +580,17 @@ func PodValidateLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { } } for k, v := range limit.Min { - if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil { + if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil { errs = append(errs, err) } } for k, v := range limit.Max { - if err := maxConstraint(limitType, k, v, podRequests, podLimits); err != nil { + if err := maxConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil { errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { - if err := limitRequestRatioConstraint(limitType, k, v, podRequests, podLimits); err != nil { + if err := limitRequestRatioConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil { errs = append(errs, err) } } diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index 05046143108..f62277fa836 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -22,18 +22,20 @@ import ( "testing" "time" + corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" api "k8s.io/kubernetes/pkg/apis/core" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + "k8s.io/kubernetes/pkg/apis/core/v1" ) func getComputeResourceList(cpu, memory string) api.ResourceList { @@ -63,8 +65,8 @@ func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequ } // createLimitRange creates a limit range with the specified data -func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRequest, maxLimitRequestRatio api.ResourceList) api.LimitRange { - return api.LimitRange{ +func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRequest, maxLimitRequestRatio api.ResourceList) corev1.LimitRange { + internalLimitRage := api.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", Namespace: "test", @@ -82,10 +84,13 @@ func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRe }, }, } + externalLimitRange := corev1.LimitRange{} + v1.Convert_core_LimitRange_To_v1_LimitRange(&internalLimitRage, &externalLimitRange, nil) + return externalLimitRange } -func validLimitRange() api.LimitRange { - return api.LimitRange{ +func validLimitRange() corev1.LimitRange { + internalLimitRange := api.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", Namespace: "test", @@ -107,10 +112,13 @@ func validLimitRange() api.LimitRange { }, }, } + externalLimitRange := corev1.LimitRange{} + v1.Convert_core_LimitRange_To_v1_LimitRange(&internalLimitRange, &externalLimitRange, nil) + return externalLimitRange } -func validLimitRangeNoDefaults() api.LimitRange { - return api.LimitRange{ +func validLimitRangeNoDefaults() corev1.LimitRange { + internalLimitRange := api.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "abc", Namespace: "test", @@ -130,6 +138,9 @@ func validLimitRangeNoDefaults() api.LimitRange { }, }, } + externalLimitRange := corev1.LimitRange{} + v1.Convert_core_LimitRange_To_v1_LimitRange(&internalLimitRange, &externalLimitRange, nil) + return externalLimitRange } func validPod(name string, numContainers int, resources api.ResourceRequirements) api.Pod { @@ -255,7 +266,7 @@ func TestMergePodResourceRequirements(t *testing.T) { func TestPodLimitFunc(t *testing.T) { type testCase struct { pod api.Pod - limitRange api.LimitRange + limitRange corev1.LimitRange } successCases := []testCase{ @@ -686,7 +697,7 @@ func TestPodLimitFuncApplyDefault(t *testing.T) { func TestLimitRangerIgnoresSubresource(t *testing.T) { limitRange := validLimitRangeNoDefaults() - mockClient := newMockClientForTest([]api.LimitRange{limitRange}) + mockClient := newMockClientForTest([]corev1.LimitRange{limitRange}) handler, informerFactory, err := newHandlerForTest(mockClient) if err != nil { t.Errorf("unexpected error initializing handler: %v", err) @@ -712,7 +723,7 @@ func TestLimitRangerIgnoresSubresource(t *testing.T) { func TestLimitRangerAdmitPod(t *testing.T) { limitRange := validLimitRangeNoDefaults() - mockClient := newMockClientForTest([]api.LimitRange{limitRange}) + mockClient := newMockClientForTest([]corev1.LimitRange{limitRange}) handler, informerFactory, err := newHandlerForTest(mockClient) if err != nil { t.Errorf("unexpected error initializing handler: %v", err) @@ -745,10 +756,10 @@ func TestLimitRangerAdmitPod(t *testing.T) { } // newMockClientForTest creates a mock client that returns a client configured for the specified list of limit ranges -func newMockClientForTest(limitRanges []api.LimitRange) *fake.Clientset { +func newMockClientForTest(limitRanges []corev1.LimitRange) *fake.Clientset { mockClient := &fake.Clientset{} mockClient.AddReactor("list", "limitranges", func(action core.Action) (bool, runtime.Object, error) { - limitRangeList := &api.LimitRangeList{ + limitRangeList := &corev1.LimitRangeList{ ListMeta: metav1.ListMeta{ ResourceVersion: fmt.Sprintf("%d", len(limitRanges)), }, @@ -769,7 +780,7 @@ func newHandlerForTest(c clientset.Interface) (*LimitRanger, informers.SharedInf if err != nil { return nil, f, err } - pluginInitializer := kubeadmission.NewPluginInitializer(c, f, nil, nil, nil) + pluginInitializer := genericadmissioninitializer.New(c, f, nil, nil) pluginInitializer.Initialize(handler) err = admission.ValidateInitialization(handler) return handler, f, err @@ -788,7 +799,7 @@ func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) func TestPersistentVolumeClaimLimitFunc(t *testing.T) { type testCase struct { pvc api.PersistentVolumeClaim - limitRange api.LimitRange + limitRange corev1.LimitRange } successCases := []testCase{ diff --git a/plugin/pkg/admission/limitranger/interfaces.go b/plugin/pkg/admission/limitranger/interfaces.go index 4c520c68480..2ba469795a9 100644 --- a/plugin/pkg/admission/limitranger/interfaces.go +++ b/plugin/pkg/admission/limitranger/interfaces.go @@ -17,20 +17,20 @@ limitations under the License. package limitranger import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/admission" - api "k8s.io/kubernetes/pkg/apis/core" ) type LimitRangerActions interface { // MutateLimit is a pluggable function to set limits on the object. - MutateLimit(limitRange *api.LimitRange, kind string, obj runtime.Object) error + MutateLimit(limitRange *corev1.LimitRange, kind string, obj runtime.Object) error // ValidateLimits is a pluggable function to enforce limits on the object. - ValidateLimit(limitRange *api.LimitRange, kind string, obj runtime.Object) error + ValidateLimit(limitRange *corev1.LimitRange, kind string, obj runtime.Object) error // SupportsAttributes is a pluggable function to allow overridding what resources the limitranger // supports. SupportsAttributes(attr admission.Attributes) bool // SupportsLimit is a pluggable function to allow ignoring limits that should not be applied // for any reason. - SupportsLimit(limitRange *api.LimitRange) bool + SupportsLimit(limitRange *corev1.LimitRange) bool } diff --git a/plugin/pkg/admission/podpreset/BUILD b/plugin/pkg/admission/podpreset/BUILD index 34329ea7228..c27fcc83eb0 100644 --- a/plugin/pkg/admission/podpreset/BUILD +++ b/plugin/pkg/admission/podpreset/BUILD @@ -12,15 +12,16 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/settings:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/settings/internalversion:go_default_library", "//pkg/controller:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/settings/v1alpha1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//staging/src/k8s.io/client-go/listers/settings/v1alpha1:go_default_library", ], ) @@ -30,16 +31,17 @@ go_library( importpath = "k8s.io/kubernetes/plugin/pkg/admission/podpreset", deps = [ "//pkg/apis/core:go_default_library", - "//pkg/apis/settings:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/settings/internalversion:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", + "//pkg/apis/core/v1:go_default_library", + "//staging/src/k8s.io/api/settings/v1alpha1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/settings/v1alpha1:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) diff --git a/plugin/pkg/admission/podpreset/admission.go b/plugin/pkg/admission/podpreset/admission.go index 782a832f6ea..d91bdd78896 100644 --- a/plugin/pkg/admission/podpreset/admission.go +++ b/plugin/pkg/admission/podpreset/admission.go @@ -24,17 +24,18 @@ import ( "github.com/golang/glog" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apiserver/pkg/admission" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + settingsv1alpha1listers "k8s.io/client-go/listers/settings/v1alpha1" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/settings" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - settingslisters "k8s.io/kubernetes/pkg/client/listers/settings/internalversion" - kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" + apiscorev1 "k8s.io/kubernetes/pkg/apis/core/v1" ) const ( @@ -52,14 +53,14 @@ func Register(plugins *admission.Plugins) { // podPresetPlugin is an implementation of admission.Interface. type podPresetPlugin struct { *admission.Handler - client internalclientset.Interface + client kubernetes.Interface - lister settingslisters.PodPresetLister + lister settingsv1alpha1listers.PodPresetLister } var _ admission.MutationInterface = &podPresetPlugin{} -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&podPresetPlugin{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&podPresetPlugin{}) +var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&podPresetPlugin{}) +var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&podPresetPlugin{}) // NewPlugin creates a new pod preset admission plugin. func NewPlugin() *podPresetPlugin { @@ -78,12 +79,12 @@ func (plugin *podPresetPlugin) ValidateInitialization() error { return nil } -func (a *podPresetPlugin) SetInternalKubeClientSet(client internalclientset.Interface) { +func (a *podPresetPlugin) SetExternalKubeClientSet(client kubernetes.Interface) { a.client = client } -func (a *podPresetPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { - podPresetInformer := f.Settings().InternalVersion().PodPresets() +func (a *podPresetPlugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + podPresetInformer := f.Settings().V1alpha1().PodPresets() a.lister = podPresetInformer.Lister() a.SetReadyFunc(podPresetInformer.Informer().HasSynced) } @@ -149,8 +150,8 @@ func (c *podPresetPlugin) Admit(a admission.Attributes) error { } // filterPodPresets returns list of PodPresets which match given Pod. -func filterPodPresets(list []*settings.PodPreset, pod *api.Pod) ([]*settings.PodPreset, error) { - var matchingPPs []*settings.PodPreset +func filterPodPresets(list []*settingsv1alpha1.PodPreset, pod *api.Pod) ([]*settingsv1alpha1.PodPreset, error) { + var matchingPPs []*settingsv1alpha1.PodPreset for _, pp := range list { selector, err := metav1.LabelSelectorAsSelector(&pp.Spec.Selector) @@ -170,7 +171,7 @@ func filterPodPresets(list []*settings.PodPreset, pod *api.Pod) ([]*settings.Pod // safeToApplyPodPresetsOnPod determines if there is any conflict in information // injected by given PodPresets in the Pod. -func safeToApplyPodPresetsOnPod(pod *api.Pod, podPresets []*settings.PodPreset) error { +func safeToApplyPodPresetsOnPod(pod *api.Pod, podPresets []*settingsv1alpha1.PodPreset) error { var errs []error // volumes attribute is defined at the Pod level, so determine if volumes @@ -188,7 +189,7 @@ func safeToApplyPodPresetsOnPod(pod *api.Pod, podPresets []*settings.PodPreset) // safeToApplyPodPresetsOnContainer determines if there is any conflict in // information injected by given PodPresets in the given container. -func safeToApplyPodPresetsOnContainer(ctr *api.Container, podPresets []*settings.PodPreset) error { +func safeToApplyPodPresetsOnContainer(ctr *api.Container, podPresets []*settingsv1alpha1.PodPreset) error { var errs []error // check if it is safe to merge env vars and volume mounts from given podpresets and // container's existing env vars. @@ -204,7 +205,7 @@ func safeToApplyPodPresetsOnContainer(ctr *api.Container, podPresets []*settings // mergeEnv merges a list of env vars with the env vars injected by given list podPresets. // It returns an error if it detects any conflict during the merge. -func mergeEnv(envVars []api.EnvVar, podPresets []*settings.PodPreset) ([]api.EnvVar, error) { +func mergeEnv(envVars []api.EnvVar, podPresets []*settingsv1alpha1.PodPreset) ([]api.EnvVar, error) { origEnv := map[string]api.EnvVar{} for _, v := range envVars { origEnv[v.Name] = v @@ -217,16 +218,21 @@ func mergeEnv(envVars []api.EnvVar, podPresets []*settings.PodPreset) ([]api.Env for _, pp := range podPresets { for _, v := range pp.Spec.Env { + internalEnv := api.EnvVar{} + if err := apiscorev1.Convert_v1_EnvVar_To_core_EnvVar(&v, &internalEnv, nil); err != nil { + return nil, err + } + found, ok := origEnv[v.Name] if !ok { // if we don't already have it append it and continue - origEnv[v.Name] = v - mergedEnv = append(mergedEnv, v) + origEnv[v.Name] = internalEnv + mergedEnv = append(mergedEnv, internalEnv) continue } // make sure they are identical or throw an error - if !reflect.DeepEqual(found, v) { + if !reflect.DeepEqual(found, internalEnv) { errs = append(errs, fmt.Errorf("merging env for %s has a conflict on %s: \n%#v\ndoes not match\n%#v\n in container", pp.GetName(), v.Name, v, found)) } } @@ -240,12 +246,19 @@ func mergeEnv(envVars []api.EnvVar, podPresets []*settings.PodPreset) ([]api.Env return mergedEnv, err } -func mergeEnvFrom(envSources []api.EnvFromSource, podPresets []*settings.PodPreset) ([]api.EnvFromSource, error) { +func mergeEnvFrom(envSources []api.EnvFromSource, podPresets []*settingsv1alpha1.PodPreset) ([]api.EnvFromSource, error) { var mergedEnvFrom []api.EnvFromSource mergedEnvFrom = append(mergedEnvFrom, envSources...) for _, pp := range podPresets { - mergedEnvFrom = append(mergedEnvFrom, pp.Spec.EnvFrom...) + for _, envFromSource := range pp.Spec.EnvFrom { + internalEnvFrom := api.EnvFromSource{} + if err := apiscorev1.Convert_v1_EnvFromSource_To_core_EnvFromSource(&envFromSource, &internalEnvFrom, nil); err != nil { + return nil, err + } + mergedEnvFrom = append(mergedEnvFrom, internalEnvFrom) + } + } return mergedEnvFrom, nil @@ -253,7 +266,7 @@ func mergeEnvFrom(envSources []api.EnvFromSource, podPresets []*settings.PodPres // mergeVolumeMounts merges given list of VolumeMounts with the volumeMounts // injected by given podPresets. It returns an error if it detects any conflict during the merge. -func mergeVolumeMounts(volumeMounts []api.VolumeMount, podPresets []*settings.PodPreset) ([]api.VolumeMount, error) { +func mergeVolumeMounts(volumeMounts []api.VolumeMount, podPresets []*settingsv1alpha1.PodPreset) ([]api.VolumeMount, error) { origVolumeMounts := map[string]api.VolumeMount{} volumeMountsByPath := map[string]api.VolumeMount{} @@ -269,15 +282,19 @@ func mergeVolumeMounts(volumeMounts []api.VolumeMount, podPresets []*settings.Po for _, pp := range podPresets { for _, v := range pp.Spec.VolumeMounts { + internalVolumeMount := api.VolumeMount{} + if err := apiscorev1.Convert_v1_VolumeMount_To_core_VolumeMount(&v, &internalVolumeMount, nil); err != nil { + return nil, err + } found, ok := origVolumeMounts[v.Name] if !ok { // if we don't already have it append it and continue - origVolumeMounts[v.Name] = v - mergedVolumeMounts = append(mergedVolumeMounts, v) + origVolumeMounts[v.Name] = internalVolumeMount + mergedVolumeMounts = append(mergedVolumeMounts, internalVolumeMount) } else { // make sure they are identical or throw an error // shall we throw an error for identical volumeMounts ? - if !reflect.DeepEqual(found, v) { + if !reflect.DeepEqual(found, internalVolumeMount) { errs = append(errs, fmt.Errorf("merging volume mounts for %s has a conflict on %s: \n%#v\ndoes not match\n%#v\n in container", pp.GetName(), v.Name, v, found)) } } @@ -285,10 +302,10 @@ func mergeVolumeMounts(volumeMounts []api.VolumeMount, podPresets []*settings.Po found, ok = volumeMountsByPath[v.MountPath] if !ok { // if we don't already have it append it and continue - volumeMountsByPath[v.MountPath] = v + volumeMountsByPath[v.MountPath] = internalVolumeMount } else { // make sure they are identical or throw an error - if !reflect.DeepEqual(found, v) { + if !reflect.DeepEqual(found, internalVolumeMount) { errs = append(errs, fmt.Errorf("merging volume mounts for %s has a conflict on mount path %s: \n%#v\ndoes not match\n%#v\n in container", pp.GetName(), v.MountPath, v, found)) } } @@ -305,7 +322,7 @@ func mergeVolumeMounts(volumeMounts []api.VolumeMount, podPresets []*settings.Po // mergeVolumes merges given list of Volumes with the volumes injected by given // podPresets. It returns an error if it detects any conflict during the merge. -func mergeVolumes(volumes []api.Volume, podPresets []*settings.PodPreset) ([]api.Volume, error) { +func mergeVolumes(volumes []api.Volume, podPresets []*settingsv1alpha1.PodPreset) ([]api.Volume, error) { origVolumes := map[string]api.Volume{} for _, v := range volumes { origVolumes[v.Name] = v @@ -318,16 +335,20 @@ func mergeVolumes(volumes []api.Volume, podPresets []*settings.PodPreset) ([]api for _, pp := range podPresets { for _, v := range pp.Spec.Volumes { + internalVolume := api.Volume{} + if err := apiscorev1.Convert_v1_Volume_To_core_Volume(&v, &internalVolume, nil); err != nil { + return nil, err + } found, ok := origVolumes[v.Name] if !ok { // if we don't already have it append it and continue - origVolumes[v.Name] = v - mergedVolumes = append(mergedVolumes, v) + origVolumes[v.Name] = internalVolume + mergedVolumes = append(mergedVolumes, internalVolume) continue } // make sure they are identical or throw an error - if !reflect.DeepEqual(found, v) { + if !reflect.DeepEqual(found, internalVolume) { errs = append(errs, fmt.Errorf("merging volumes for %s has a conflict on %s: \n%#v\ndoes not match\n%#v\n in container", pp.GetName(), v.Name, v, found)) } } @@ -348,7 +369,7 @@ func mergeVolumes(volumes []api.Volume, podPresets []*settings.PodPreset) ([]api // applyPodPresetsOnPod updates the PodSpec with merged information from all the // applicable PodPresets. It ignores the errors of merge functions because merge // errors have already been checked in safeToApplyPodPresetsOnPod function. -func applyPodPresetsOnPod(pod *api.Pod, podPresets []*settings.PodPreset) { +func applyPodPresetsOnPod(pod *api.Pod, podPresets []*settingsv1alpha1.PodPreset) { if len(podPresets) == 0 { return } @@ -374,7 +395,7 @@ func applyPodPresetsOnPod(pod *api.Pod, podPresets []*settings.PodPreset) { // applyPodPresetsOnContainer injects envVars, VolumeMounts and envFrom from // given podPresets in to the given container. It ignores conflict errors // because it assumes those have been checked already by the caller. -func applyPodPresetsOnContainer(ctr *api.Container, podPresets []*settings.PodPreset) { +func applyPodPresetsOnContainer(ctr *api.Container, podPresets []*settingsv1alpha1.PodPreset) { envVars, _ := mergeEnv(ctr.Env, podPresets) ctr.Env = envVars diff --git a/plugin/pkg/admission/podpreset/admission_test.go b/plugin/pkg/admission/podpreset/admission_test.go index 3342530f24a..8e1a7fa4cc7 100644 --- a/plugin/pkg/admission/podpreset/admission_test.go +++ b/plugin/pkg/admission/podpreset/admission_test.go @@ -20,44 +20,45 @@ import ( "reflect" "testing" + corev1 "k8s.io/api/core/v1" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" kadmission "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + settingsv1alpha1listers "k8s.io/client-go/listers/settings/v1alpha1" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/settings" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - settingslisters "k8s.io/kubernetes/pkg/client/listers/settings/internalversion" "k8s.io/kubernetes/pkg/controller" ) func TestMergeEnv(t *testing.T) { tests := map[string]struct { orig []api.EnvVar - mod []api.EnvVar + mod []corev1.EnvVar result []api.EnvVar shouldFail bool }{ "empty original": { - mod: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, + mod: []corev1.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, result: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, shouldFail: false, }, "good merge": { orig: []api.EnvVar{{Name: "abcd", Value: "value2"}, {Name: "hello", Value: "value3"}}, - mod: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, + mod: []corev1.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, result: []api.EnvVar{{Name: "abcd", Value: "value2"}, {Name: "hello", Value: "value3"}, {Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, shouldFail: false, }, "conflict": { orig: []api.EnvVar{{Name: "abc", Value: "value3"}}, - mod: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, + mod: []corev1.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, shouldFail: true, }, "one is exact same": { orig: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "hello", Value: "value3"}}, - mod: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, + mod: []corev1.EnvVar{{Name: "abc", Value: "value2"}, {Name: "ABC", Value: "value3"}}, result: []api.EnvVar{{Name: "abc", Value: "value2"}, {Name: "hello", Value: "value3"}, {Name: "ABC", Value: "value3"}}, shouldFail: false, }, @@ -66,7 +67,7 @@ func TestMergeEnv(t *testing.T) { for name, test := range tests { result, err := mergeEnv( test.orig, - []*settings.PodPreset{{Spec: settings.PodPresetSpec{Env: test.mod}}}, + []*settingsv1alpha1.PodPreset{{Spec: settingsv1alpha1.PodPresetSpec{Env: test.mod}}}, ) if test.shouldFail && err == nil { t.Fatalf("expected test %q to fail but got nil", name) @@ -83,21 +84,21 @@ func TestMergeEnv(t *testing.T) { func TestMergeEnvFrom(t *testing.T) { tests := map[string]struct { orig []api.EnvFromSource - mod []api.EnvFromSource + mod []corev1.EnvFromSource result []api.EnvFromSource shouldFail bool }{ "empty original": { - mod: []api.EnvFromSource{ + mod: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -124,16 +125,16 @@ func TestMergeEnvFrom(t *testing.T) { }, }, }, - mod: []api.EnvFromSource{ + mod: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -162,7 +163,7 @@ func TestMergeEnvFrom(t *testing.T) { for name, test := range tests { result, err := mergeEnvFrom( test.orig, - []*settings.PodPreset{{Spec: settings.PodPresetSpec{EnvFrom: test.mod}}}, + []*settingsv1alpha1.PodPreset{{Spec: settingsv1alpha1.PodPresetSpec{EnvFrom: test.mod}}}, ) if test.shouldFail && err == nil { t.Fatalf("expected test %q to fail but got nil", name) @@ -179,12 +180,12 @@ func TestMergeEnvFrom(t *testing.T) { func TestMergeVolumeMounts(t *testing.T) { tests := map[string]struct { orig []api.VolumeMount - mod []api.VolumeMount + mod []corev1.VolumeMount result []api.VolumeMount shouldFail bool }{ "empty original": { - mod: []api.VolumeMount{ + mod: []corev1.VolumeMount{ { Name: "simply-mounted-volume", MountPath: "/opt/", @@ -199,7 +200,7 @@ func TestMergeVolumeMounts(t *testing.T) { shouldFail: false, }, "good merge": { - mod: []api.VolumeMount{ + mod: []corev1.VolumeMount{ { Name: "simply-mounted-volume", MountPath: "/opt/", @@ -224,7 +225,7 @@ func TestMergeVolumeMounts(t *testing.T) { shouldFail: false, }, "conflict": { - mod: []api.VolumeMount{ + mod: []corev1.VolumeMount{ { Name: "simply-mounted-volume", MountPath: "/opt/", @@ -243,7 +244,7 @@ func TestMergeVolumeMounts(t *testing.T) { shouldFail: true, }, "conflict on mount path": { - mod: []api.VolumeMount{ + mod: []corev1.VolumeMount{ { Name: "simply-mounted-volume", MountPath: "/opt/", @@ -262,7 +263,7 @@ func TestMergeVolumeMounts(t *testing.T) { shouldFail: true, }, "one is exact same": { - mod: []api.VolumeMount{ + mod: []corev1.VolumeMount{ { Name: "simply-mounted-volume", MountPath: "/opt/", @@ -295,7 +296,7 @@ func TestMergeVolumeMounts(t *testing.T) { for name, test := range tests { result, err := mergeVolumeMounts( test.orig, - []*settings.PodPreset{{Spec: settings.PodPresetSpec{VolumeMounts: test.mod}}}, + []*settingsv1alpha1.PodPreset{{Spec: settingsv1alpha1.PodPresetSpec{VolumeMounts: test.mod}}}, ) if test.shouldFail && err == nil { t.Fatalf("expected test %q to fail but got nil", name) @@ -312,14 +313,14 @@ func TestMergeVolumeMounts(t *testing.T) { func TestMergeVolumes(t *testing.T) { tests := map[string]struct { orig []api.Volume - mod []api.Volume + mod []corev1.Volume result []api.Volume shouldFail bool }{ "empty original": { - mod: []api.Volume{ - {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, - {Name: "vol2", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + mod: []corev1.Volume{ + {Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "vol2", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, }, result: []api.Volume{ {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, @@ -332,9 +333,9 @@ func TestMergeVolumes(t *testing.T) { {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, {Name: "vol4", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, }, - mod: []api.Volume{ - {Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, - {Name: "vol2", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + mod: []corev1.Volume{ + {Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "vol2", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, }, result: []api.Volume{ {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, @@ -349,9 +350,9 @@ func TestMergeVolumes(t *testing.T) { {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, {Name: "vol4", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, }, - mod: []api.Volume{ - {Name: "vol3", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/etc/apparmor.d"}}}, - {Name: "vol2", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + mod: []corev1.Volume{ + {Name: "vol3", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/apparmor.d"}}}, + {Name: "vol2", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, }, shouldFail: true, }, @@ -360,9 +361,9 @@ func TestMergeVolumes(t *testing.T) { {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, {Name: "vol4", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, }, - mod: []api.Volume{ - {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, - {Name: "vol2", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, + mod: []corev1.Volume{ + {Name: "vol3", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "vol2", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, }, result: []api.Volume{ {Name: "vol3", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}, @@ -376,7 +377,7 @@ func TestMergeVolumes(t *testing.T) { for name, test := range tests { result, err := mergeVolumes( test.orig, - []*settings.PodPreset{{Spec: settings.PodPresetSpec{Volumes: test.mod}}}, + []*settingsv1alpha1.PodPreset{{Spec: settingsv1alpha1.PodPresetSpec{Volumes: test.mod}}}, ) if test.shouldFail && err == nil { t.Fatalf("expected test %q to fail but got nil", name) @@ -392,7 +393,7 @@ func TestMergeVolumes(t *testing.T) { // NewTestAdmission provides an admission plugin with test implementations of internal structs. It uses // an authorizer that always returns true. -func NewTestAdmission(lister settingslisters.PodPresetLister, objects ...runtime.Object) kadmission.MutationInterface { +func NewTestAdmission(lister settingsv1alpha1listers.PodPresetLister, objects ...runtime.Object) kadmission.MutationInterface { // Build a test client that the admission plugin can use to look up the service account missing from its cache client := fake.NewSimpleClientset(objects...) @@ -424,12 +425,12 @@ func TestAdmitConflictWithDifferentNamespaceShouldDoNothing(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "othernamespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -439,7 +440,7 @@ func TestAdmitConflictWithDifferentNamespaceShouldDoNothing(t *testing.T) { }, }, }, - Env: []api.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, + Env: []corev1.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, }, } @@ -470,12 +471,12 @@ func TestAdmitConflictWithNonMatchingLabelsShouldNotError(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "namespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -485,7 +486,7 @@ func TestAdmitConflictWithNonMatchingLabelsShouldNotError(t *testing.T) { }, }, }, - Env: []api.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, + Env: []corev1.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, }, } @@ -517,12 +518,12 @@ func TestAdmitConflictShouldNotModifyPod(t *testing.T) { } origPod := *pod - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "namespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -532,7 +533,7 @@ func TestAdmitConflictShouldNotModifyPod(t *testing.T) { }, }, }, - Env: []api.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, + Env: []corev1.EnvVar{{Name: "abc", Value: "value"}, {Name: "ABC", Value: "value"}}, }, } @@ -566,12 +567,12 @@ func TestAdmit(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "namespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -581,18 +582,18 @@ func TestAdmit(t *testing.T) { }, }, }, - Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, - Env: []api.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, - EnvFrom: []api.EnvFromSource{ + Volumes: []corev1.Volume{{Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + Env: []corev1.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, + EnvFrom: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -626,12 +627,12 @@ func TestAdmitMirrorPod(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "namespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -641,18 +642,18 @@ func TestAdmitMirrorPod(t *testing.T) { }, }, }, - Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, - Env: []api.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, - EnvFrom: []api.EnvFromSource{ + Volumes: []corev1.Volume{{Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + Env: []corev1.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, + EnvFrom: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -696,12 +697,12 @@ func TestExclusionNoAdmit(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "namespace", }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -711,18 +712,18 @@ func TestExclusionNoAdmit(t *testing.T) { }, }, }, - Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, - Env: []api.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, - EnvFrom: []api.EnvFromSource{ + Volumes: []corev1.Volume{{Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + Env: []corev1.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, + EnvFrom: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -760,12 +761,12 @@ func TestAdmitEmptyPodNamespace(t *testing.T) { }, } - pip := &settings.PodPreset{ + pip := &settingsv1alpha1.PodPreset{ ObjectMeta: metav1.ObjectMeta{ Name: "hello", Namespace: "different", // (pod will be submitted to namespace 'namespace') }, - Spec: settings.PodPresetSpec{ + Spec: settingsv1alpha1.PodPresetSpec{ Selector: metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { @@ -775,18 +776,18 @@ func TestAdmitEmptyPodNamespace(t *testing.T) { }, }, }, - Volumes: []api.Volume{{Name: "vol", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}, - Env: []api.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, - EnvFrom: []api.EnvFromSource{ + Volumes: []corev1.Volume{{Name: "vol", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}}, + Env: []corev1.EnvVar{{Name: "abcd", Value: "value"}, {Name: "ABC", Value: "value"}}, + EnvFrom: []corev1.EnvFromSource{ { - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, { Prefix: "pre_", - ConfigMapRef: &api.ConfigMapEnvSource{ - LocalObjectReference: api.LocalObjectReference{Name: "abc"}, + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: "abc"}, }, }, }, @@ -804,11 +805,11 @@ func TestAdmitEmptyPodNamespace(t *testing.T) { } } -func admitPod(pod *api.Pod, pip *settings.PodPreset) error { +func admitPod(pod *api.Pod, pip *settingsv1alpha1.PodPreset) error { informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc()) - store := informerFactory.Settings().InternalVersion().PodPresets().Informer().GetStore() + store := informerFactory.Settings().V1alpha1().PodPresets().Informer().GetStore() store.Add(pip) - plugin := NewTestAdmission(informerFactory.Settings().InternalVersion().PodPresets().Lister()) + plugin := NewTestAdmission(informerFactory.Settings().V1alpha1().PodPresets().Lister()) attrs := kadmission.NewAttributesRecord( pod, nil, diff --git a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go index c21764baee8..03571ab2bcf 100644 --- a/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go +++ b/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction // +k8s:defaulter-gen=TypeMeta +// +groupName=podtolerationrestriction.admission.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=podtolerationrestriction.admission.k8s.io package v1alpha1 // import "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1" diff --git a/plugin/pkg/admission/priority/BUILD b/plugin/pkg/admission/priority/BUILD index 6489d6830e9..513cc84def3 100644 --- a/plugin/pkg/admission/priority/BUILD +++ b/plugin/pkg/admission/priority/BUILD @@ -13,13 +13,15 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", + "//pkg/apis/scheduling/v1beta1:go_default_library", "//pkg/controller:go_default_library", "//pkg/features:go_default_library", + "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", "//vendor/github.com/golang/glog:go_default_library", ], ) @@ -31,17 +33,18 @@ go_library( deps = [ "//pkg/apis/core:go_default_library", "//pkg/apis/scheduling:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/scheduling/internalversion:go_default_library", "//pkg/features:go_default_library", - "//pkg/kubeapiserver/admission:go_default_library", "//pkg/kubelet/types:go_default_library", + "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/scheduling/v1beta1:go_default_library", ], ) diff --git a/plugin/pkg/admission/priority/admission.go b/plugin/pkg/admission/priority/admission.go index e7054a47464..8ae7afbe048 100644 --- a/plugin/pkg/admission/priority/admission.go +++ b/plugin/pkg/admission/priority/admission.go @@ -20,18 +20,19 @@ import ( "fmt" "io" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/admission" + genericadmissioninitializers "k8s.io/apiserver/pkg/admission/initializer" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + schedulingv1beta1listers "k8s.io/client-go/listers/scheduling/v1beta1" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" - schedulinglisters "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion" "k8s.io/kubernetes/pkg/features" - kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -50,14 +51,14 @@ func Register(plugins *admission.Plugins) { // priorityPlugin is an implementation of admission.Interface. type priorityPlugin struct { *admission.Handler - client internalclientset.Interface - lister schedulinglisters.PriorityClassLister + client kubernetes.Interface + lister schedulingv1beta1listers.PriorityClassLister } var _ admission.MutationInterface = &priorityPlugin{} var _ admission.ValidationInterface = &priorityPlugin{} -var _ = kubeapiserveradmission.WantsInternalKubeInformerFactory(&priorityPlugin{}) -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&priorityPlugin{}) +var _ = genericadmissioninitializers.WantsExternalKubeInformerFactory(&priorityPlugin{}) +var _ = genericadmissioninitializers.WantsExternalKubeClientSet(&priorityPlugin{}) // NewPlugin creates a new priority admission plugin. func newPlugin() *priorityPlugin { @@ -78,13 +79,13 @@ func (p *priorityPlugin) ValidateInitialization() error { } // SetInternalKubeClientSet implements the WantsInternalKubeClientSet interface. -func (p *priorityPlugin) SetInternalKubeClientSet(client internalclientset.Interface) { +func (p *priorityPlugin) SetExternalKubeClientSet(client kubernetes.Interface) { p.client = client } // SetInternalKubeInformerFactory implements the WantsInternalKubeInformerFactory interface. -func (p *priorityPlugin) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { - priorityInformer := f.Scheduling().InternalVersion().PriorityClasses() +func (p *priorityPlugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + priorityInformer := f.Scheduling().V1beta1().PriorityClasses() p.lister = priorityInformer.Lister() p.SetReadyFunc(priorityInformer.Informer().HasSynced) } @@ -241,14 +242,14 @@ func (p *priorityPlugin) validatePriorityClass(a admission.Attributes) error { return nil } -func (p *priorityPlugin) getDefaultPriorityClass() (*scheduling.PriorityClass, error) { +func (p *priorityPlugin) getDefaultPriorityClass() (*schedulingv1beta1.PriorityClass, error) { list, err := p.lister.List(labels.Everything()) if err != nil { return nil, err } // In case more than one global default priority class is added as a result of a race condition, // we pick the one with the lowest priority value. - var defaultPC *scheduling.PriorityClass + var defaultPC *schedulingv1beta1.PriorityClass for _, pci := range list { if pci.GlobalDefault { if defaultPC == nil || defaultPC.Value > pci.Value { diff --git a/plugin/pkg/admission/priority/admission_test.go b/plugin/pkg/admission/priority/admission_test.go index 378d9d515a2..19e599b778f 100644 --- a/plugin/pkg/admission/priority/admission_test.go +++ b/plugin/pkg/admission/priority/admission_test.go @@ -22,24 +22,31 @@ import ( "github.com/golang/glog" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/authentication/user" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + "k8s.io/kubernetes/pkg/apis/scheduling/v1beta1" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/features" ) -func addPriorityClasses(ctrl *priorityPlugin, priorityClasses []*scheduling.PriorityClass) { +func addPriorityClasses(ctrl *priorityPlugin, priorityClasses []*scheduling.PriorityClass) error { informerFactory := informers.NewSharedInformerFactory(nil, controller.NoResyncPeriodFunc()) - ctrl.SetInternalKubeInformerFactory(informerFactory) + ctrl.SetExternalKubeInformerFactory(informerFactory) // First add the existing classes to the cache. for _, c := range priorityClasses { - informerFactory.Scheduling().InternalVersion().PriorityClasses().Informer().GetStore().Add(c) + s := &schedulingv1beta1.PriorityClass{} + if err := v1beta1.Convert_scheduling_PriorityClass_To_v1beta1_PriorityClass(c, s, nil); err != nil { + return err + } + informerFactory.Scheduling().V1beta1().PriorityClasses().Informer().GetStore().Add(s) } + return nil } var defaultClass1 = &scheduling.PriorityClass{ @@ -135,7 +142,9 @@ func TestPriorityClassAdmission(t *testing.T) { ctrl := newPlugin() // Add existing priority classes. - addPriorityClasses(ctrl, test.existingClasses) + if err := addPriorityClasses(ctrl, test.existingClasses); err != nil { + t.Errorf("Test %q: unable to add object to informer: %v", test.name, err) + } // Now add the new class. attrs := admission.NewAttributesRecord( test.newClass, @@ -220,7 +229,9 @@ func TestDefaultPriority(t *testing.T) { for _, test := range tests { glog.V(4).Infof("starting test %q", test.name) ctrl := newPlugin() - addPriorityClasses(ctrl, test.classesBefore) + if err := addPriorityClasses(ctrl, test.classesBefore); err != nil { + t.Errorf("Test %q: unable to add object to informer: %v", test.name, err) + } defaultPriority, err := ctrl.getDefaultPriority() if err != nil { t.Errorf("Test %q: unexpected error while getting default priority: %v", test.name, err) @@ -234,7 +245,9 @@ func TestDefaultPriority(t *testing.T) { t.Errorf("Test %q: unexpected error received: %v", test.name, err) } } - addPriorityClasses(ctrl, test.classesAfter) + if err := addPriorityClasses(ctrl, test.classesAfter); err != nil { + t.Errorf("Test %q: unable to add object to informer: %v", test.name, err) + } defaultPriority, err = ctrl.getDefaultPriority() if err != nil { t.Errorf("Test %q: unexpected error while getting default priority: %v", test.name, err) @@ -557,7 +570,9 @@ func TestPodAdmission(t *testing.T) { ctrl := newPlugin() // Add existing priority classes. - addPriorityClasses(ctrl, test.existingClasses) + if err := addPriorityClasses(ctrl, test.existingClasses); err != nil { + t.Errorf("Test %q: unable to add object to informer: %v", test.name, err) + } // Create pod. attrs := admission.NewAttributesRecord( diff --git a/plugin/pkg/admission/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/BUILD index 714cbdd8b02..e638b0691a0 100644 --- a/plugin/pkg/admission/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/BUILD @@ -17,19 +17,16 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", - "//pkg/client/listers/core/internalversion:go_default_library", "//pkg/kubeapiserver/admission:go_default_library", - "//pkg/quota:go_default_library", - "//pkg/quota/generic:go_default_library", + "//pkg/quota/v1:go_default_library", + "//pkg/quota/v1/generic:go_default_library", "//pkg/util/reflector/prometheus:go_default_library", "//pkg/util/workqueue/prometheus:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/install:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota/validation:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -41,7 +38,11 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/admission/initializer:go_default_library", "//staging/src/k8s.io/apiserver/pkg/storage/etcd:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", @@ -54,17 +55,18 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/apis/core:go_default_library", - "//pkg/client/clientset_generated/internalclientset/fake:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/admission:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", "//vendor/github.com/hashicorp/golang-lru:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index ec8bc590a23..43097ef5569 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -21,13 +21,14 @@ import ( "io" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apiserver/pkg/admission" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" + genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation" ) @@ -65,12 +66,13 @@ type QuotaAdmission struct { } var _ admission.ValidationInterface = &QuotaAdmission{} -var _ = kubeapiserveradmission.WantsInternalKubeClientSet(&QuotaAdmission{}) +var _ = genericadmissioninitializer.WantsExternalKubeInformerFactory(&QuotaAdmission{}) +var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&QuotaAdmission{}) var _ = kubeapiserveradmission.WantsQuotaConfiguration(&QuotaAdmission{}) type liveLookupEntry struct { expiry time.Time - items []*api.ResourceQuota + items []*corev1.ResourceQuota } // NewResourceQuota configures an admission controller that can enforce quota constraints @@ -91,12 +93,12 @@ func NewResourceQuota(config *resourcequotaapi.Configuration, numEvaluators int, }, nil } -func (a *QuotaAdmission) SetInternalKubeClientSet(client internalclientset.Interface) { +func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) { a.quotaAccessor.client = client } -func (a *QuotaAdmission) SetInternalKubeInformerFactory(f informers.SharedInformerFactory) { - a.quotaAccessor.lister = f.Core().InternalVersion().ResourceQuotas().Lister() +func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { + a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister() } func (a *QuotaAdmission) SetQuotaConfiguration(c quota.Configuration) { diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index 610b2dec51b..09e29b0b8fc 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -25,19 +25,20 @@ import ( lru "github.com/hashicorp/golang-lru" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" testcore "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" - informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller" - "k8s.io/kubernetes/pkg/quota/generic" - "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + "k8s.io/kubernetes/pkg/quota/v1/install" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -92,33 +93,33 @@ func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) } func TestPrettyPrint(t *testing.T) { - toResourceList := func(resources map[api.ResourceName]string) api.ResourceList { - resourceList := api.ResourceList{} + toResourceList := func(resources map[corev1.ResourceName]string) corev1.ResourceList { + resourceList := corev1.ResourceList{} for key, value := range resources { resourceList[key] = resource.MustParse(value) } return resourceList } testCases := []struct { - input api.ResourceList + input corev1.ResourceList expected string }{ { - input: toResourceList(map[api.ResourceName]string{ - api.ResourceCPU: "100m", + input: toResourceList(map[corev1.ResourceName]string{ + corev1.ResourceCPU: "100m", }), expected: "cpu=100m", }, { - input: toResourceList(map[api.ResourceName]string{ - api.ResourcePods: "10", - api.ResourceServices: "10", - api.ResourceReplicationControllers: "10", - api.ResourceServicesNodePorts: "10", - api.ResourceRequestsCPU: "100m", - api.ResourceRequestsMemory: "100Mi", - api.ResourceLimitsCPU: "100m", - api.ResourceLimitsMemory: "100Mi", + input: toResourceList(map[corev1.ResourceName]string{ + corev1.ResourcePods: "10", + corev1.ResourceServices: "10", + corev1.ResourceReplicationControllers: "10", + corev1.ResourceServicesNodePorts: "10", + corev1.ResourceRequestsCPU: "100m", + corev1.ResourceRequestsMemory: "100Mi", + corev1.ResourceLimitsCPU: "100m", + corev1.ResourceLimitsMemory: "100Mi", }), expected: "limits.cpu=100m,limits.memory=100Mi,pods=10,replicationcontrollers=10,requests.cpu=100m,requests.memory=100Mi,services=10,services.nodeports=10", }, @@ -140,7 +141,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -150,7 +151,7 @@ func TestAdmissionIgnoresDelete(t *testing.T) { evaluator: evaluator, } namespace := "default" - err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", api.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", corev1.Resource("pods").WithVersion("version"), "", admission.Delete, false, nil)) if err != nil { t.Errorf("ResourceQuota should admit all deletes: %v", err) } @@ -160,15 +161,15 @@ func TestAdmissionIgnoresDelete(t *testing.T) { // It verifies that creation of a pod that would have exceeded quota is properly failed // It verifies that create operations to a subresource that would have exceeded quota would succeed func TestAdmissionIgnoresSubresources(t *testing.T) { - resourceQuota := &api.ResourceQuota{} + resourceQuota := &corev1.ResourceQuota{} resourceQuota.Name = "quota" resourceQuota.Namespace = "test" - resourceQuota.Status = api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, + resourceQuota.Status = corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{}, + Used: corev1.ResourceList{}, } - resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi") - resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi") + resourceQuota.Status.Hard[corev1.ResourceMemory] = resource.MustParse("2Gi") + resourceQuota.Status.Used[corev1.ResourceMemory] = resource.MustParse("1Gi") stopCh := make(chan struct{}) defer close(stopCh) @@ -176,7 +177,7 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -185,13 +186,13 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "subresource", admission.Create, false, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } @@ -199,18 +200,18 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { // TestAdmitBelowQuotaLimit verifies that a pod when created has its usage reflected on the quota func TestAdmitBelowQuotaLimit(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -221,7 +222,7 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -230,9 +231,9 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -253,18 +254,18 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1100m"), - api.ResourceMemory: resource.MustParse("52Gi"), - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1100m"), + corev1.ResourceMemory: resource.MustParse("52Gi"), + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -281,18 +282,18 @@ func TestAdmitBelowQuotaLimit(t *testing.T) { // TestAdmitDryRun verifies that a pod when created with dry-run doesn not have its usage reflected on the quota // and that dry-run requests can still be rejected if they would exceed the quota func TestAdmitDryRun(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -303,7 +304,7 @@ func TestAdmitDryRun(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -312,16 +313,16 @@ func TestAdmitDryRun(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } newPod = validPod("too-large-pod", 1, getResourceRequirements(getResourceList("100m", "60Gi"), getResourceList("", ""))) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, true, nil)) if err == nil { t.Errorf("Expected error but got none") } @@ -334,18 +335,18 @@ func TestAdmitDryRun(t *testing.T) { // TestAdmitHandlesOldObjects verifies that admit handles updates correctly with old objects func TestAdmitHandlesOldObjects(t *testing.T) { // in this scenario, the old quota was based on a service type=loadbalancer - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), }, }, } @@ -358,7 +359,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -367,7 +368,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // old service was a load balancer, but updated version is a node port. existingService := &api.Service{ @@ -381,7 +382,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, api.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -404,21 +405,21 @@ func TestAdmitHandlesOldObjects(t *testing.T) { // verify usage decremented the loadbalancer, and incremented the nodeport, but kept the service the same. decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) - // Verify service usage. Since we don't add negative values, the api.ResourceServicesLoadBalancers + // Verify service usage. Since we don't add negative values, the corev1.ResourceServicesLoadBalancers // will remain on last reported value - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), }, }, } @@ -433,16 +434,16 @@ func TestAdmitHandlesOldObjects(t *testing.T) { } func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -465,7 +466,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -474,7 +475,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) oldPVC := &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"}, @@ -490,7 +491,7 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { }, } - err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -500,16 +501,16 @@ func TestAdmitHandlesNegativePVCUpdates(t *testing.T) { } func TestAdmitHandlesPVCUpdates(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -532,7 +533,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -541,7 +542,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) oldPVC := &api.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"}, @@ -557,7 +558,7 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { }, } - err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -580,16 +581,16 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("15Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("15Gi"), }, }, } @@ -607,18 +608,18 @@ func TestAdmitHandlesPVCUpdates(t *testing.T) { // TestAdmitHandlesCreatingUpdates verifies that admit handles updates which behave as creates func TestAdmitHandlesCreatingUpdates(t *testing.T) { // in this scenario, there is an existing service - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("1"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("0"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("1"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("0"), }, }, } @@ -631,7 +632,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -640,7 +641,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // old service didn't exist, so this update is actually a create oldService := &api.Service{ @@ -654,7 +655,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { Ports: []api.ServicePort{{Port: 1234}}, }, } - err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, api.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -677,18 +678,18 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { // verify that the "old" object was ignored for calculating the new usage decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("10"), - api.ResourceServicesLoadBalancers: resource.MustParse("10"), - api.ResourceServicesNodePorts: resource.MustParse("10"), + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("10"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("10"), + corev1.ResourceServicesNodePorts: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("2"), - api.ResourceServicesLoadBalancers: resource.MustParse("1"), - api.ResourceServicesNodePorts: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("2"), + corev1.ResourceServicesLoadBalancers: resource.MustParse("1"), + corev1.ResourceServicesNodePorts: resource.MustParse("1"), }, }, } @@ -704,18 +705,18 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) { // TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission. func TestAdmitExceedQuotaLimit(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -726,7 +727,7 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -735,9 +736,9 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error exceeding quota") } @@ -747,20 +748,20 @@ func TestAdmitExceedQuotaLimit(t *testing.T) { // specified on the pod. In this case, we create a quota that tracks cpu request, memory request, and memory limit. // We ensure that a pod that does not specify a memory limit that it fails in admission. func TestAdmitEnforceQuotaConstraints(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourceLimitsMemory: resource.MustParse("200Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("200Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourceLimitsMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -771,7 +772,7 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -780,10 +781,10 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify all values are specified as required on the quota newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pod does not specify a memory limit") } @@ -791,20 +792,20 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) { // TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourceLimitsMemory: resource.MustParse("200Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("200Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourceLimitsMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourceLimitsMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -819,7 +820,7 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() quotaAccessor.liveLookupCache = liveLookupCache config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() @@ -830,11 +831,11 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { evaluator: evaluator, } // Add to the index - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", ""))) // Add to the lru cache so we do not do a live client lookup - liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*api.ResourceQuota{}}) - err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + liveLookupCache.Add(newPod.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(30 * time.Second)), items: []*corev1.ResourceQuota{}}) + err = handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Did not expect an error because the pod is in a different namespace than the quota") } @@ -844,39 +845,39 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) { // It creates a terminating and non-terminating quota, and creates a terminating pod. // It ensures that the terminating quota is incremented, and the non-terminating quota is not. func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { - resourceQuotaNonTerminating := &api.ResourceQuota{ + resourceQuotaNonTerminating := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotTerminating}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } - resourceQuotaTerminating := &api.ResourceQuota{ + resourceQuotaTerminating := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeTerminating}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1"), - api.ResourceMemory: resource.MustParse("50Gi"), - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("50Gi"), + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -887,7 +888,7 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -896,14 +897,14 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNonTerminating) - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaTerminating) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNonTerminating) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaTerminating) // create a pod that has an active deadline newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) activeDeadlineSeconds := int64(30) newPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -924,24 +925,24 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) // ensure only the quota-terminating was updated if usage.Name != resourceQuotaTerminating.Name { t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaTerminating.Name, usage.Name) } - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceCPU: resource.MustParse("3"), - api.ResourceMemory: resource.MustParse("100Gi"), - api.ResourcePods: resource.MustParse("5"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1100m"), - api.ResourceMemory: resource.MustParse("52Gi"), - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1100m"), + corev1.ResourceMemory: resource.MustParse("52Gi"), + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -958,31 +959,31 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) { // TestAdmitBelowBestEffortQuotaLimit creates a best effort and non-best effort quota. // It verifies that best effort pods are properly scoped to the best effort quota document. func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { - resourceQuotaBestEffort := &api.ResourceQuota{ + resourceQuotaBestEffort := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } - resourceQuotaNotBestEffort := &api.ResourceQuota{ + resourceQuotaNotBestEffort := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -993,7 +994,7 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1002,12 +1003,12 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaBestEffort) - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNotBestEffort) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaBestEffort) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNotBestEffort) // create a pod that is best effort because it does not make a request for anything newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1023,19 +1024,19 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) { } decimatedActions := removeListWatch(kubeClient.Actions()) lastActionIndex := len(decimatedActions) - 1 - usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*api.ResourceQuota) + usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota) if usage.Name != resourceQuotaBestEffort.Name { t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaBestEffort.Name, usage.Name) } - expectedUsage := api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + expectedUsage := corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("4"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("4"), }, }, } @@ -1065,17 +1066,17 @@ func removeListWatch(in []testcore.Action) []testcore.Action { // TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource // guaranteed pod. func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, } @@ -1086,7 +1087,7 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1095,9 +1096,9 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1110,32 +1111,32 @@ func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) { func TestHasUsageStats(t *testing.T) { testCases := map[string]struct { - a api.ResourceQuota + a corev1.ResourceQuota expected bool }{ "empty": { - a: api.ResourceQuota{Status: api.ResourceQuotaStatus{Hard: api.ResourceList{}}}, + a: corev1.ResourceQuota{Status: corev1.ResourceQuotaStatus{Hard: corev1.ResourceList{}}}, expected: true, }, "hard-only": { - a: api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + a: corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, - Used: api.ResourceList{}, + Used: corev1.ResourceList{}, }, }, expected: false, }, "hard-used": { - a: api.ResourceQuota{ - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + a: corev1.ResourceQuota{ + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("500Mi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("500Mi"), }, }, }, @@ -1153,14 +1154,14 @@ func TestHasUsageStats(t *testing.T) { // namespace, it will be set. func TestAdmissionSetsMissingNamespace(t *testing.T) { namespace := "test" - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("1"), }, }, } @@ -1172,7 +1173,7 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1181,13 +1182,13 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) newPod := validPod("pod-without-namespace", 1, getResourceRequirements(getResourceList("1", "2Gi"), getResourceList("", ""))) // unset the namespace newPod.ObjectMeta.Namespace = "" - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Got unexpected error: %v", err) } @@ -1198,16 +1199,16 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) { // TestAdmitRejectsNegativeUsage verifies that usage for any measured resource cannot be negative. func TestAdmitRejectsNegativeUsage(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("3"), - api.ResourceRequestsStorage: resource.MustParse("100Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"), + corev1.ResourceRequestsStorage: resource.MustParse("100Gi"), }, - Used: api.ResourceList{ - api.ResourcePersistentVolumeClaims: resource.MustParse("1"), - api.ResourceRequestsStorage: resource.MustParse("10Gi"), + Used: corev1.ResourceList{ + corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"), + corev1.ResourceRequestsStorage: resource.MustParse("10Gi"), }, }, } @@ -1218,7 +1219,7 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1227,17 +1228,17 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // verify quota rejects negative pvc storage requests newPvc := validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("-1Gi")}, api.ResourceList{})) - err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error because the pvc has negative storage usage") } // verify quota accepts non-negative pvc storage requests newPvc = validPersistentVolumeClaim("not-allowed-pvc", getResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("1Gi")}, api.ResourceList{})) - err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, api.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) + err = handler.Validate(admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1245,16 +1246,16 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) { // TestAdmitWhenUnrelatedResourceExceedsQuota verifies that if resource X exceeds quota, it does not prohibit resource Y from admission. func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceServices: resource.MustParse("3"), - api.ResourcePods: resource.MustParse("4"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("3"), + corev1.ResourcePods: resource.MustParse("4"), }, - Used: api.ResourceList{ - api.ResourceServices: resource.MustParse("4"), - api.ResourcePods: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceServices: resource.MustParse("4"), + corev1.ResourcePods: resource.MustParse("1"), }, }, } @@ -1265,7 +1266,7 @@ func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() config := &resourcequotaapi.Configuration{} quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -1274,11 +1275,11 @@ func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) { Handler: admission.NewHandler(admission.Create, admission.Update), evaluator: evaluator, } - informerFactory.Core().InternalVersion().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) + informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota) // create a pod that should pass existing quota newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -1293,7 +1294,7 @@ func TestAdmitLimitedResourceNoQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. config := &resourcequotaapi.Configuration{ @@ -1312,7 +1313,7 @@ func TestAdmitLimitedResourceNoQuota(t *testing.T) { evaluator: evaluator, } newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Errorf("Expected an error for consuming a limited resource without quota.") } @@ -1327,7 +1328,7 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. config := &resourcequotaapi.Configuration{ @@ -1346,7 +1347,7 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { evaluator: evaluator, } newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -1354,14 +1355,14 @@ func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) { // TestAdmitLimitedResourceWithQuota verifies if a limited resource is configured with quota, it can be consumed. func TestAdmitLimitedResourceWithQuota(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), }, }, } @@ -1373,7 +1374,7 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1394,7 +1395,7 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1402,25 +1403,25 @@ func TestAdmitLimitedResourceWithQuota(t *testing.T) { // TestAdmitLimitedResourceWithMultipleQuota verifies if a limited resource is configured with quota, it can be consumed if one matches. func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { - resourceQuota1 := &api.ResourceQuota{ + resourceQuota1 := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota1", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("10"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("10"), }, - Used: api.ResourceList{ - api.ResourceRequestsCPU: resource.MustParse("1"), + Used: corev1.ResourceList{ + corev1.ResourceRequestsCPU: resource.MustParse("1"), }, }, } - resourceQuota2 := &api.ResourceQuota{ + resourceQuota2 := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota2", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -1432,7 +1433,7 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1454,7 +1455,7 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { indexer.Add(resourceQuota1) indexer.Add(resourceQuota2) newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1462,14 +1463,14 @@ func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) { // TestAdmitLimitedResourceWithQuotaThatDoesNotCover verifies if a limited resource is configured the quota must cover the resource. func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { - resourceQuota := &api.ResourceQuota{ + resourceQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, } @@ -1481,7 +1482,7 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() // disable consumption of cpu unless there is a covering quota. // disable consumption of cpu unless there is a covering quota. @@ -1502,7 +1503,7 @@ func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) { } indexer.Add(resourceQuota) newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", ""))) - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if err == nil { t.Fatalf("Expected an error since the quota did not cover cpu") } @@ -1513,22 +1514,22 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { testCases := []struct { description string testPod *api.Pod - quota *api.ResourceQuota - anotherQuota *api.ResourceQuota + quota *corev1.ResourceQuota + anotherQuota *corev1.ResourceQuota config *resourcequotaapi.Configuration expErr string }{ { description: "Covering quota exists for configured limited scope PriorityClassNameExists.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -1537,10 +1538,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1551,14 +1552,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "configured limited scope PriorityClassNameExists and limited cpu resource. No covering quota for cpu and pod admit fails.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -1567,10 +1568,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only @@ -1582,15 +1583,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota does not exist for configured limited scope PriorityClassNameExists.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1601,15 +1602,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota does not exist for configured limited scope resourceQuotaBestEffort", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1620,17 +1621,17 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Covering quota exist for configured limited scope resourceQuotaBestEffort", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, @@ -1638,10 +1639,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1652,24 +1653,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Neither matches pod. Pod allowed", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1681,24 +1682,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only BestEffort scope matches pod. Pod admit fails because covering quota is missing for BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1710,24 +1711,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only PriorityClass scope matches pod. Pod admit fails because covering quota is missing for PriorityClass scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1739,24 +1740,24 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Pod admit fails because covering quota is missing for PriorityClass scope and BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1768,17 +1769,17 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for BestEffort scope. Pod admit fails because covering quota is missing for PriorityClass scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, @@ -1786,19 +1787,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1810,14 +1811,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for PriorityClass scope. Pod admit fails because covering quota is missing for BestEffort scope", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1828,19 +1829,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1852,28 +1853,28 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for both the scopes. Pod admit success. No Error", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort}, + Spec: corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort}, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourcePods: resource.MustParse("5"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("5"), }, - Used: api.ResourceList{ - api.ResourcePods: resource.MustParse("3"), + Used: corev1.ResourceList{ + corev1.ResourcePods: resource.MustParse("3"), }, }, }, - anotherQuota: &api.ResourceQuota{ + anotherQuota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1884,19 +1885,19 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopeBestEffort, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopeBestEffort, + Operator: corev1.ScopeSelectorOpExists, }, }, }, { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1908,30 +1909,30 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod allowed with priorityclass if limited scope PriorityClassNameExists not configured.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{}, expErr: "", }, { description: "quota fails, though covering quota for configured limited scope PriorityClassNameExists exists.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "20Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, - Status: api.ResourceQuotaStatus{ - Hard: api.ResourceList{ - api.ResourceMemory: resource.MustParse("10Gi"), + Status: corev1.ResourceQuotaStatus{ + Hard: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Gi"), }, - Used: api.ResourceList{ - api.ResourceMemory: resource.MustParse("1Gi"), + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), }, }, }, @@ -1939,10 +1940,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists, }, }, }, @@ -1953,14 +1954,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has different priorityclass than configured limited. Covering quota exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1971,10 +1972,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -1986,14 +1987,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"cluster-services"}, }, }, @@ -2004,10 +2005,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2019,14 +2020,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota does not exist for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name"}, }, }, @@ -2037,10 +2038,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2052,14 +2053,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "From the above test case, just changing pod priority from cluster-services to another-priorityclass-name. expecting no error", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "another-priorityclass-name"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name"}, }, }, @@ -2070,10 +2071,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2085,15 +2086,15 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota does NOT exists for configured limited scope PriorityClassIn.", testPod: validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{}, + quota: &corev1.ResourceQuota{}, config: &resourcequotaapi.Configuration{ LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2105,14 +2106,14 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { { description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn through PriorityClassNameExists", testPod: validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"), - quota: &api.ResourceQuota{ + quota: &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"}, - Spec: api.ResourceQuotaSpec{ - ScopeSelector: &api.ScopeSelector{ - MatchExpressions: []api.ScopedResourceSelectorRequirement{ + Spec: corev1.ResourceQuotaSpec{ + ScopeSelector: &corev1.ScopeSelector{ + MatchExpressions: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpExists}, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpExists}, }, }, }, @@ -2121,10 +2122,10 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { LimitedResources: []resourcequotaapi.LimitedResource{ { Resource: "pods", - MatchScopes: []api.ScopedResourceSelectorRequirement{ + MatchScopes: []corev1.ScopedResourceSelectorRequirement{ { - ScopeName: api.ResourceQuotaScopePriorityClass, - Operator: api.ScopeSelectorOpIn, + ScopeName: corev1.ResourceQuotaScopePriorityClass, + Operator: corev1.ScopeSelectorOpIn, Values: []string{"another-priorityclass-name", "cluster-services"}, }, }, @@ -2150,7 +2151,7 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) quotaAccessor, _ := newQuotaAccessor() quotaAccessor.client = kubeClient - quotaAccessor.lister = informerFactory.Core().InternalVersion().ResourceQuotas().Lister() + quotaAccessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() quotaConfiguration := install.NewQuotaConfigurationForAdmission() evaluator := NewQuotaEvaluator(quotaAccessor, quotaConfiguration.IgnoredResources(), generic.NewRegistry(quotaConfiguration.Evaluators()), nil, config, 5, stopCh) @@ -2163,7 +2164,7 @@ func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) { if testCase.anotherQuota != nil { indexer.Add(testCase.anotherQuota) } - err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) + err := handler.Validate(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, false, nil)) if testCase.expErr == "" { if err != nil { t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr) diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD index 9440df52e70..b35353ab96d 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/BUILD @@ -15,7 +15,7 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", deps = [ - "//pkg/apis/core:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go index 7686e4dab5c..b8ffc104218 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/types.go @@ -17,8 +17,8 @@ limitations under the License. package resourcequota import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/core" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -68,5 +68,5 @@ type LimitedResource struct { // "PriorityClassNameIn=cluster-services" // +optional // MatchScopes []string `json:"matchScopes,omitempty"` - MatchScopes []core.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"` + MatchScopes []corev1.ScopedResourceSelectorRequirement `json:"matchScopes,omitempty"` } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD index 3022ef85a23..65ce81437b0 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/BUILD @@ -18,7 +18,6 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", deps = [ - "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go index 53508c2f2da..5e8dc097580 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota // +k8s:defaulter-gen=TypeMeta +// +groupName=resourcequota.admission.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=resourcequota.admission.k8s.io package v1alpha1 // import "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1" diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go index 27f439dd653..3ca9511b3c5 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1/zz_generated.conversion.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -84,7 +83,7 @@ func autoConvert_v1alpha1_LimitedResource_To_resourcequota_LimitedResource(in *L out.APIGroup = in.APIGroup out.Resource = in.Resource out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains)) - out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) + out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) return nil } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD index 47b24da1ab7..0819802ecc5 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/BUILD @@ -18,7 +18,6 @@ go_library( ], importpath = "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1", deps = [ - "//pkg/apis/core:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/doc.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/doc.go index c86fedd4f94..3f4dd11b218 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/doc.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota // +k8s:defaulter-gen=TypeMeta +// +groupName=resourcequota.admission.k8s.io // Package v1beta1 is the v1beta1 version of the API. -// +groupName=resourcequota.admission.k8s.io package v1beta1 // import "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1" diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go index fb8cb98723e..bff5582a611 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/v1beta1/zz_generated.conversion.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" resourcequota "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" ) @@ -84,7 +83,7 @@ func autoConvert_v1beta1_LimitedResource_To_resourcequota_LimitedResource(in *Li out.APIGroup = in.APIGroup out.Resource = in.Resource out.MatchContains = *(*[]string)(unsafe.Pointer(&in.MatchContains)) - out.MatchScopes = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) + out.MatchScopes = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchScopes)) return nil } diff --git a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go index c33c217ca7c..78baf66effc 100644 --- a/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go +++ b/plugin/pkg/admission/resourcequota/apis/resourcequota/zz_generated.deepcopy.go @@ -21,8 +21,8 @@ limitations under the License. package resourcequota import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -67,7 +67,7 @@ func (in *LimitedResource) DeepCopyInto(out *LimitedResource) { } if in.MatchScopes != nil { in, out := &in.MatchScopes, &out.MatchScopes - *out = make([]core.ScopedResourceSelectorRequirement, len(*in)) + *out = make([]v1.ScopedResourceSelectorRequirement, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/plugin/pkg/admission/resourcequota/controller.go b/plugin/pkg/admission/resourcequota/controller.go index 30b9defafa0..34c621903b9 100644 --- a/plugin/pkg/admission/resourcequota/controller.go +++ b/plugin/pkg/admission/resourcequota/controller.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" @@ -34,9 +35,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/admission" "k8s.io/client-go/util/workqueue" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/quota" - "k8s.io/kubernetes/pkg/quota/generic" + quota "k8s.io/kubernetes/pkg/quota/v1" + "k8s.io/kubernetes/pkg/quota/v1/generic" _ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration _ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" @@ -52,7 +52,7 @@ type Evaluator interface { type quotaEvaluator struct { quotaAccessor QuotaAccessor // lockAcquisitionFunc acquires any required locks and returns a cleanup method to defer - lockAcquisitionFunc func([]api.ResourceQuota) func() + lockAcquisitionFunc func([]corev1.ResourceQuota) func() ignoredResources map[schema.GroupResource]struct{} @@ -111,7 +111,7 @@ func newAdmissionWaiter(a admission.Attributes) *admissionWaiter { // NewQuotaEvaluator configures an admission controller that can enforce quota constraints // using the provided registry. The registry must have the capability to handle group/kinds that // are persisted by the server this admission controller is intercepting -func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]api.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { +func NewQuotaEvaluator(quotaAccessor QuotaAccessor, ignoredResources map[schema.GroupResource]struct{}, quotaRegistry quota.Registry, lockAcquisitionFunc func([]corev1.ResourceQuota) func(), config *resourcequotaapi.Configuration, workers int, stopCh <-chan struct{}) Evaluator { // if we get a nil config, just create an empty default. if config == nil { config = &resourcequotaapi.Configuration{} @@ -214,7 +214,7 @@ func (e *quotaEvaluator) checkAttributes(ns string, admissionAttributes []*admis // updates failed on conflict errors and we have retries left, re-get the failed quota from our cache for the latest version // and recurse into this method with the subset. It's safe for us to evaluate ONLY the subset, because the other quota // documents for these waiters have already been evaluated. Step 1, will mark all the ones that should already have succeeded. -func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { +func (e *quotaEvaluator) checkQuotas(quotas []corev1.ResourceQuota, admissionAttributes []*admissionWaiter, remainingRetries int) { // yet another copy to compare against originals to see if we actually have deltas originalQuotas, err := copyQuotas(quotas) if err != nil { @@ -264,7 +264,7 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib // 1. check to see if the quota changed. If not, skip. // 2. if the quota changed and the update passes, be happy // 3. if the quota changed and the update fails, add the original to a retry list - var updatedFailedQuotas []api.ResourceQuota + var updatedFailedQuotas []corev1.ResourceQuota var lastErr error for i := range quotas { newQuota := quotas[i] @@ -318,7 +318,7 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib // this logic goes through our cache to find the new version of all quotas that failed update. If something has been removed // it is skipped on this retry. After all, you removed it. - quotasToCheck := []api.ResourceQuota{} + quotasToCheck := []corev1.ResourceQuota{} for _, newQuota := range newQuotas { for _, oldQuota := range updatedFailedQuotas { if newQuota.Name == oldQuota.Name { @@ -330,8 +330,8 @@ func (e *quotaEvaluator) checkQuotas(quotas []api.ResourceQuota, admissionAttrib e.checkQuotas(quotasToCheck, admissionAttributes, remainingRetries-1) } -func copyQuotas(in []api.ResourceQuota) ([]api.ResourceQuota, error) { - out := make([]api.ResourceQuota, 0, len(in)) +func copyQuotas(in []corev1.ResourceQuota) ([]corev1.ResourceQuota, error) { + out := make([]corev1.ResourceQuota, 0, len(in)) for _, quota := range in { out = append(out, *quota.DeepCopy()) } @@ -355,8 +355,8 @@ func filterLimitedResourcesByGroupResource(input []resourcequotaapi.LimitedResou // limitedByDefault determines from the specified usage and limitedResources the set of resources names // that must be present in a covering quota. It returns empty set if it was unable to determine if // a resource was not limited by default. -func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []api.ResourceName { - result := []api.ResourceName{} +func limitedByDefault(usage corev1.ResourceList, limitedResources []resourcequotaapi.LimitedResource) []corev1.ResourceName { + result := []corev1.ResourceName{} for _, limitedResource := range limitedResources { for k, v := range usage { // if a resource is consumed, we need to check if it matches on the limited resource list. @@ -374,13 +374,13 @@ func limitedByDefault(usage api.ResourceList, limitedResources []resourcequotaap return result } -func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]api.ScopedResourceSelectorRequirement, error) { - scopes := []api.ScopedResourceSelectorRequirement{} +func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Object, limitedResources []resourcequotaapi.LimitedResource) ([]corev1.ScopedResourceSelectorRequirement, error) { + scopes := []corev1.ScopedResourceSelectorRequirement{} for _, limitedResource := range limitedResources { matched, err := evaluator.MatchingScopes(inputObject, limitedResource.MatchScopes) if err != nil { glog.Errorf("Error while matching limited Scopes: %v", err) - return []api.ScopedResourceSelectorRequirement{}, err + return []corev1.ScopedResourceSelectorRequirement{}, err } for _, scope := range matched { scopes = append(scopes, scope) @@ -391,7 +391,7 @@ func getMatchedLimitedScopes(evaluator quota.Evaluator, inputObject runtime.Obje // checkRequest verifies that the request does not exceed any quota constraint. it returns a copy of quotas not yet persisted // that capture what the usage would be if the request succeeded. It return an error if there is insufficient quota to satisfy the request -func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.Attributes) ([]api.ResourceQuota, error) { +func (e *quotaEvaluator) checkRequest(quotas []corev1.ResourceQuota, a admission.Attributes) ([]corev1.ResourceQuota, error) { evaluator := e.registry.Get(a.GetResource().GroupResource()) if evaluator == nil { return quotas, nil @@ -400,8 +400,8 @@ func (e *quotaEvaluator) checkRequest(quotas []api.ResourceQuota, a admission.At } // CheckRequest is a static version of quotaEvaluator.checkRequest, possible to be called from outside. -func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator, - limited []resourcequotaapi.LimitedResource) ([]api.ResourceQuota, error) { +func CheckRequest(quotas []corev1.ResourceQuota, a admission.Attributes, evaluator quota.Evaluator, + limited []resourcequotaapi.LimitedResource) ([]corev1.ResourceQuota, error) { if !evaluator.Handles(a) { return quotas, nil } @@ -416,7 +416,7 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator } // determine the set of resource names that must exist in a covering quota - limitedResourceNames := []api.ResourceName{} + limitedResourceNames := []corev1.ResourceName{} limitedResources := filterLimitedResourcesByGroupResource(limited, a.GetResource().GroupResource()) if len(limitedResources) > 0 { deltaUsage, err := evaluator.Usage(inputObject) @@ -436,7 +436,7 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator // this is needed to know if we have satisfied any constraints where consumption // was limited by default. restrictedResourcesSet := sets.String{} - restrictedScopes := []api.ScopedResourceSelectorRequirement{} + restrictedScopes := []corev1.ScopedResourceSelectorRequirement{} for i := range quotas { resourceQuota := quotas[i] scopeSelectors := getScopeSelectorsFromQuota(resourceQuota) @@ -571,12 +571,12 @@ func CheckRequest(quotas []api.ResourceQuota, a admission.Attributes, evaluator return outQuotas, nil } -func getScopeSelectorsFromQuota(quota api.ResourceQuota) []api.ScopedResourceSelectorRequirement { - selectors := []api.ScopedResourceSelectorRequirement{} +func getScopeSelectorsFromQuota(quota corev1.ResourceQuota) []corev1.ScopedResourceSelectorRequirement { + selectors := []corev1.ScopedResourceSelectorRequirement{} for _, scope := range quota.Spec.Scopes { - selectors = append(selectors, api.ScopedResourceSelectorRequirement{ + selectors = append(selectors, corev1.ScopedResourceSelectorRequirement{ ScopeName: scope, - Operator: api.ScopeSelectorOpExists}) + Operator: corev1.ScopeSelectorOpExists}) } if quota.Spec.ScopeSelector != nil { for _, scopeSelector := range quota.Spec.ScopeSelector.MatchExpressions { @@ -680,7 +680,7 @@ func (e *quotaEvaluator) getWork() (string, []*admissionWaiter, bool) { // prettyPrint formats a resource list for usage in errors // it outputs resources sorted in increasing order -func prettyPrint(item api.ResourceList) string { +func prettyPrint(item corev1.ResourceList) string { parts := []string{} keys := []string{} for key := range item { @@ -688,14 +688,14 @@ func prettyPrint(item api.ResourceList) string { } sort.Strings(keys) for _, key := range keys { - value := item[api.ResourceName(key)] + value := item[corev1.ResourceName(key)] constraint := key + "=" + value.String() parts = append(parts, constraint) } return strings.Join(parts, ",") } -func prettyPrintResourceNames(a []api.ResourceName) string { +func prettyPrintResourceNames(a []corev1.ResourceName) string { values := []string{} for _, value := range a { values = append(values, string(value)) @@ -705,7 +705,7 @@ func prettyPrintResourceNames(a []api.ResourceName) string { } // hasUsageStats returns true if for each hard constraint there is a value for its current usage -func hasUsageStats(resourceQuota *api.ResourceQuota) bool { +func hasUsageStats(resourceQuota *corev1.ResourceQuota) bool { for resourceName := range resourceQuota.Status.Hard { if _, found := resourceQuota.Status.Used[resourceName]; !found { return false diff --git a/plugin/pkg/admission/resourcequota/resource_access.go b/plugin/pkg/admission/resourcequota/resource_access.go index c7e12d6c8e0..f703d478b33 100644 --- a/plugin/pkg/admission/resourcequota/resource_access.go +++ b/plugin/pkg/admission/resourcequota/resource_access.go @@ -20,14 +20,14 @@ import ( "fmt" "time" - lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/golang-lru" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apiserver/pkg/storage/etcd" - api "k8s.io/kubernetes/pkg/apis/core" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - corelisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" ) // QuotaAccessor abstracts the get/set logic from the rest of the Evaluator. This could be a test stub, a straight passthrough, @@ -35,17 +35,17 @@ import ( type QuotaAccessor interface { // UpdateQuotaStatus is called to persist final status. This method should write to persistent storage. // An error indicates that write didn't complete successfully. - UpdateQuotaStatus(newQuota *api.ResourceQuota) error + UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error // GetQuotas gets all possible quotas for a given namespace - GetQuotas(namespace string) ([]api.ResourceQuota, error) + GetQuotas(namespace string) ([]corev1.ResourceQuota, error) } type quotaAccessor struct { - client clientset.Interface + client kubernetes.Interface // lister can list/get quota objects from a shared informer's cache - lister corelisters.ResourceQuotaLister + lister corev1listers.ResourceQuotaLister // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. @@ -77,8 +77,8 @@ func newQuotaAccessor() (*quotaAccessor, error) { }, nil } -func (e *quotaAccessor) UpdateQuotaStatus(newQuota *api.ResourceQuota) error { - updatedQuota, err := e.client.Core().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota) +func (e *quotaAccessor) UpdateQuotaStatus(newQuota *corev1.ResourceQuota) error { + updatedQuota, err := e.client.CoreV1().ResourceQuotas(newQuota.Namespace).UpdateStatus(newQuota) if err != nil { return err } @@ -93,13 +93,13 @@ var etcdVersioner = etcd.APIObjectVersioner{} // checkCache compares the passed quota against the value in the look-aside cache and returns the newer // if the cache is out of date, it deletes the stale entry. This only works because of etcd resourceVersions // being monotonically increasing integers -func (e *quotaAccessor) checkCache(quota *api.ResourceQuota) *api.ResourceQuota { +func (e *quotaAccessor) checkCache(quota *corev1.ResourceQuota) *corev1.ResourceQuota { key := quota.Namespace + "/" + quota.Name uncastCachedQuota, ok := e.updatedQuotas.Get(key) if !ok { return quota } - cachedQuota := uncastCachedQuota.(*api.ResourceQuota) + cachedQuota := uncastCachedQuota.(*corev1.ResourceQuota) if etcdVersioner.CompareResourceVersion(quota, cachedQuota) >= 0 { e.updatedQuotas.Remove(key) @@ -108,7 +108,7 @@ func (e *quotaAccessor) checkCache(quota *api.ResourceQuota) *api.ResourceQuota return cachedQuota } -func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) { +func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, error) { // determine if there are any quotas in this namespace // if there are no quotas, we don't need to do anything items, err := e.lister.ResourceQuotas(namespace).List(labels.Everything()) @@ -142,7 +142,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) } } - resourceQuotas := []api.ResourceQuota{} + resourceQuotas := []corev1.ResourceQuota{} for i := range items { quota := items[i] quota = e.checkCache(quota) diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index bf9c2dfda5e..33b3e610685 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -402,7 +402,7 @@ func ClusterRoles() []rbacv1.ClusterRole { eventsRule(), rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(), - rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts").RuleOrDie(), + rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts", "configmaps").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(), // Needed to check API access. These creates are non-mutating rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(), diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml index 894b46a98f5..1df60152a91 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml @@ -640,6 +640,7 @@ items: - apiGroups: - "" resources: + - configmaps - endpoints - namespaces - secrets diff --git a/staging/README.md b/staging/README.md index 98d47d18b57..e56a44c8280 100644 --- a/staging/README.md +++ b/staging/README.md @@ -6,15 +6,22 @@ top-level k8s.io repositories. Repositories currently staged here: -- [`k8s.io/apiextensions-apiserver`](https://github.com/kubernetes/apiextensions-apiserver) - [`k8s.io/api`](https://github.com/kubernetes/api) +- [`k8s.io/apiextensions-apiserver`](https://github.com/kubernetes/apiextensions-apiserver) - [`k8s.io/apimachinery`](https://github.com/kubernetes/apimachinery) - [`k8s.io/apiserver`](https://github.com/kubernetes/apiserver) +- [`k8s.io/cli-runtime`](https://github.com/kubernetes/cli-runtime) - [`k8s.io/client-go`](https://github.com/kubernetes/client-go) -- [`k8s.io/kube-aggregator`](https://github.com/kubernetes/kube-aggregator) - [`k8s.io/code-generator`](https://github.com/kubernetes/code-generator) +- [`k8s.io/csi-api`](https://github.com/kubernetes/csi-api) +- [`k8s.io/kube-aggregator`](https://github.com/kubernetes/kube-aggregator) +- [`k8s.io/kube-controller-manager`](https://github.com/kubernetes/kube-controller-manager) +- [`k8s.io/kube-proxy`](https://github.com/kubernetes/kube-proxy) +- [`k8s.io/kube-scheduler`](https://github.com/kubernetes/kube-scheduler) +- [`k8s.io/kubelet`](https://github.com/kubernetes/kubelet) - [`k8s.io/metrics`](https://github.com/kubernetes/metrics) - [`k8s.io/sample-apiserver`](https://github.com/kubernetes/sample-apiserver) +- [`k8s.io/sample-cli-plugin`](https://github.com/kubernetes/sample-cli-plugin) - [`k8s.io/sample-controller`](https://github.com/kubernetes/sample-controller) The code in the staging/ directory is authoritative, i.e. the only copy of the @@ -39,3 +46,53 @@ import ( Once the change-over to external repositories is complete, these repositories will actually be vendored from `k8s.io/`. + +## Creating a new repository in staging + +### Adding the staging repository in `kubernetes/kubernetes`: + +1. Send an email to the SIG Architecture +[mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) +and the mailing list of the SIG which would own the repo requesting approval +for creating the staging repository. + +2. Once approval has been granted, create the new staging repository. + +3. Add a symlink to the staging repo in `vendor/k8s.io`. + +4. Update [`hack/import-restrictions.yaml`](https://github.com/kubernetes/kubernetes/blob/master/hack/import-restrictions.yaml) +to add the list of other staging repos that this new repo can import. + +5. Add all mandatory template files mentioned in +https://github.com/kubernetes/kubernetes-template-project. + +6. Make sure that the `.github/PULL_REQUEST_TEMPLATE.md` and +`CONTRIBUTING.md` files mention that PRs are not directly accepted to the repo. + +### Creating the published repository + +1. Create an [issue](https://github.com/kubernetes/org/issues/new?template=repo-create.md) +in the `kubernetes/org` repo to request creation of the respective published +repository in the Kubernetes org. The published repository **must** have an +initial empty commit. It also needs specific access rules and branch settings. +See [#kubernetes/org#58](https://github.com/kubernetes/org/issues/58) +for an example. + +2. Setup branch protection and enable access to the `stage-bots` team +by adding the repo in +[`prow/config.yaml`](https://github.com/kubernetes/test-infra/blob/master/prow/config.yaml). + +3. Once the repository has been created in the Kubernetes org, +update the publishing-bot to publish the staging repository by updating: + + - [`kubernetes-rules-configmap.yaml`](https://github.com/kubernetes/publishing-bot/blob/master/configs/kubernetes-rules-configmap.yaml): + Make sure that the list of dependencies reflects the staging repos in the `Godeps.json` file. + + - [`fetch-all-latest-and-push.sh`](https://github.com/kubernetes/publishing-bot/blob/master/hack/fetch-all-latest-and-push.sh): + Add the staging repo in the list of repos to be published. + +4. Add the staging and published repositories as a subproject for the +SIG that owns the repos in +[`sigs.yaml`](https://github.com/kubernetes/community/blob/master/sigs.yaml). + +5. Add the repo to the list of staging repos in this `README.md` file. diff --git a/staging/src/BUILD b/staging/src/BUILD index 31e4f9ca529..e9cf566b9ab 100644 --- a/staging/src/BUILD +++ b/staging/src/BUILD @@ -58,6 +58,7 @@ filegroup( "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:all-srcs", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:all-srcs", "//staging/src/k8s.io/apimachinery/pkg/util/validation:all-srcs", + "//staging/src/k8s.io/apimachinery/pkg/util/version:all-srcs", "//staging/src/k8s.io/apimachinery/pkg/util/wait:all-srcs", "//staging/src/k8s.io/apimachinery/pkg/util/waitgroup:all-srcs", "//staging/src/k8s.io/apimachinery/pkg/util/webhook:all-srcs", @@ -206,6 +207,7 @@ filegroup( "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:all-srcs", "//staging/src/k8s.io/csi-api/pkg/client/informers/externalversions:all-srcs", "//staging/src/k8s.io/csi-api/pkg/client/listers/csi/v1alpha1:all-srcs", + "//staging/src/k8s.io/csi-api/pkg/crd:all-srcs", "//staging/src/k8s.io/kube-aggregator:all-srcs", "//staging/src/k8s.io/kube-controller-manager/config/v1alpha1:all-srcs", "//staging/src/k8s.io/kube-proxy/config/v1alpha1:all-srcs", diff --git a/staging/src/k8s.io/api/Godeps/Godeps.json b/staging/src/k8s.io/api/Godeps/Godeps.json index 6d345b23828..7fa23517871 100644 --- a/staging/src/k8s.io/api/Godeps/Godeps.json +++ b/staging/src/k8s.io/api/Godeps/Godeps.json @@ -44,7 +44,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/pmezard/go-difflib/difflib", diff --git a/staging/src/k8s.io/api/admission/v1beta1/doc.go b/staging/src/k8s.io/api/admission/v1beta1/doc.go index a26d4d45a45..f5135f0f308 100644 --- a/staging/src/k8s.io/api/admission/v1beta1/doc.go +++ b/staging/src/k8s.io/api/admission/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=false // +groupName=admission.k8s.io + package v1beta1 // import "k8s.io/api/admission/v1beta1" diff --git a/staging/src/k8s.io/api/admissionregistration/v1alpha1/doc.go b/staging/src/k8s.io/api/admissionregistration/v1alpha1/doc.go index 8a5d1fbbb6e..d29913cf52c 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/staging/src/k8s.io/api/admissionregistration/v1beta1/doc.go b/staging/src/k8s.io/api/admissionregistration/v1beta1/doc.go index afbb3d6d3ad..2b29efaca4a 100644 --- a/staging/src/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/staging/src/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -16,10 +16,10 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration // InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/staging/src/k8s.io/api/authentication/v1/doc.go b/staging/src/k8s.io/api/authentication/v1/doc.go index 2d2ed2ee821..193f154abe9 100644 --- a/staging/src/k8s.io/api/authentication/v1/doc.go +++ b/staging/src/k8s.io/api/authentication/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1 // import "k8s.io/api/authentication/v1" diff --git a/staging/src/k8s.io/api/authentication/v1beta1/doc.go b/staging/src/k8s.io/api/authentication/v1beta1/doc.go index e0de315d40d..919f3c42fdd 100644 --- a/staging/src/k8s.io/api/authentication/v1beta1/doc.go +++ b/staging/src/k8s.io/api/authentication/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/staging/src/k8s.io/api/authorization/v1/doc.go b/staging/src/k8s.io/api/authorization/v1/doc.go index c06b798df8c..c63ac28cfa7 100644 --- a/staging/src/k8s.io/api/authorization/v1/doc.go +++ b/staging/src/k8s.io/api/authorization/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1 // import "k8s.io/api/authorization/v1" diff --git a/staging/src/k8s.io/api/authorization/v1beta1/doc.go b/staging/src/k8s.io/api/authorization/v1beta1/doc.go index ea4f802e289..324f293a17c 100644 --- a/staging/src/k8s.io/api/authorization/v1beta1/doc.go +++ b/staging/src/k8s.io/api/authorization/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=authorization.k8s.io + package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/staging/src/k8s.io/api/certificates/v1beta1/doc.go b/staging/src/k8s.io/api/certificates/v1beta1/doc.go index fb23aadb0e9..8473b640fa5 100644 --- a/staging/src/k8s.io/api/certificates/v1beta1/doc.go +++ b/staging/src/k8s.io/api/certificates/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=certificates.k8s.io + package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/staging/src/k8s.io/api/coordination/v1beta1/doc.go b/staging/src/k8s.io/api/coordination/v1beta1/doc.go index fecb513fcfa..bc95fd17d4d 100644 --- a/staging/src/k8s.io/api/coordination/v1beta1/doc.go +++ b/staging/src/k8s.io/api/coordination/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=coordination.k8s.io + package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/staging/src/k8s.io/api/core/v1/generated.pb.go b/staging/src/k8s.io/api/core/v1/generated.pb.go index b569ea84de1..c9ce92523fd 100644 --- a/staging/src/k8s.io/api/core/v1/generated.pb.go +++ b/staging/src/k8s.io/api/core/v1/generated.pb.go @@ -7684,6 +7684,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) i += copy(dAtA[i:], *m.RuntimeClassName) } + if m.EnableServiceLinks != nil { + dAtA[i] = 0xf0 + i++ + dAtA[i] = 0x1 + i++ + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } return i, nil } @@ -13315,6 +13327,9 @@ func (m *PodSpec) Size() (n int) { l = len(*m.RuntimeClassName) n += 2 + l + sovGenerated(uint64(l)) } + if m.EnableServiceLinks != nil { + n += 3 + } return n } @@ -16325,6 +16340,7 @@ func (this *PodSpec) String() string { `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, `}`, }, "") return s @@ -39509,6 +39525,27 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.RuntimeClassName = &s iNdEx = postIndex + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51346,804 +51383,806 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 12780 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, - 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, - 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, - 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, - 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, - 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, - 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, - 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, - 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, - 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, - 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, - 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, - 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, - 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, - 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, - 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, - 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, - 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, - 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, - 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, - 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, - 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, - 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, - 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, - 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, - 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, - 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, - 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, - 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, - 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, - 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, - 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, - 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, - 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, - 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, - 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, - 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, - 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, - 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, - 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, - 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, - 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, - 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, - 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, - 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, - 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, - 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, - 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, - 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, - 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, - 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, - 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, - 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, - 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, - 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, - 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, - 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, - 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, - 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, - 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, - 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, - 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, - 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, - 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, - 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, - 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, - 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, - 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, - 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, - 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, - 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, - 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, - 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, - 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, - 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, - 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, - 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, - 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, - 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, - 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, - 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, - 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, - 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, - 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, - 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, - 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, - 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, - 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, - 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, - 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, - 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, - 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, - 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, - 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, - 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, - 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, - 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, - 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, - 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, - 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, - 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, - 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, - 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, - 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, - 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, - 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, - 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, - 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, - 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, - 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, - 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, - 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, - 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, - 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, - 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, - 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, - 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, - 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, - 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, - 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, - 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, - 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, - 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, - 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, - 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, - 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, - 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, - 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, - 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, - 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, - 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, - 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, - 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, - 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, - 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, - 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, - 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, - 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, - 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, - 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, - 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, - 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, - 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, - 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, - 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, - 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, - 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, - 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, - 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, - 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, - 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, - 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, - 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, - 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, - 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, - 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, - 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, - 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, - 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, - 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, - 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, - 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, - 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, - 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, - 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, - 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, - 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, - 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, - 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, - 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, - 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, - 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, - 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, - 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, - 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, - 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, - 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, - 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, - 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, - 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, - 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, - 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, - 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, - 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, - 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, - 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, - 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, - 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, - 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, - 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, - 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, - 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, - 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, - 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, - 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, - 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, - 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, - 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, - 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, - 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, - 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, - 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, - 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, - 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, - 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, - 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, - 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, - 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, - 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, - 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, - 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, - 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, - 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, - 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, - 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, - 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, - 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, - 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, - 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, - 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, - 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, - 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, - 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, - 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, - 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, - 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, - 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, - 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, - 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, - 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, - 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, - 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, - 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, - 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, - 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, - 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, - 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, - 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, - 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, - 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, - 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, - 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, - 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, - 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, - 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, - 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, - 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, - 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, - 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, - 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, - 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, - 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, - 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, - 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, - 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, - 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, - 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, - 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, - 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, - 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, - 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, - 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, - 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, - 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, - 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, - 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, - 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, - 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, - 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, - 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, - 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, - 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, - 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, - 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, - 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, - 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, - 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, - 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, - 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, - 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, - 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, - 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, - 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, - 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, - 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, - 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, - 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, - 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, - 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, - 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, - 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, - 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, - 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, - 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, - 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, - 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, - 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, - 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, - 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, - 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, - 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, - 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, - 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, - 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, - 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, - 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, - 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, - 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, - 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, - 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, - 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, - 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, - 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, - 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, - 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, - 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, - 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, - 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, - 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, - 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, - 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, - 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, - 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, - 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, - 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, - 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, - 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, - 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, - 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, - 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, - 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, - 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, - 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, - 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, - 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, - 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, - 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, - 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, - 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, - 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, - 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, - 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, - 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, - 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, - 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, - 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, - 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, - 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, - 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, - 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, - 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, - 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, - 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, - 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, - 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, - 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, - 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, - 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, - 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, - 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, - 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, - 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, - 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, - 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, - 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, - 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, - 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, - 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, - 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, - 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, - 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, - 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, - 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, - 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, - 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, - 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, - 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, - 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, - 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, - 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, - 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, - 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, - 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, - 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, - 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, - 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, - 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, - 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, - 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, - 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, - 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, - 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, - 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, - 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, - 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, - 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, - 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, - 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, - 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, - 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, - 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, - 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, - 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, - 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, - 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, - 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, - 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, - 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, - 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, - 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, - 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, - 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, - 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, - 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, - 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, - 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, - 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, - 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, - 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, - 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, - 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, - 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, - 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, - 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, - 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, - 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, - 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, - 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, - 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, - 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, - 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, - 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, - 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, - 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, - 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, - 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, - 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, - 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, - 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, - 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, - 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, - 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, - 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, - 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, - 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, - 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, - 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, - 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, - 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, - 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, - 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, - 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, - 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, - 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, - 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, - 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, - 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, - 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, - 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, - 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, - 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, - 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, - 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, - 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, - 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, - 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, - 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, - 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, - 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, - 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, - 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, - 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, - 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, - 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, - 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, - 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, - 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, - 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, - 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, - 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, - 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, - 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, - 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, - 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, - 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, - 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, - 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, - 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, - 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, - 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, - 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, - 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, - 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, - 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, - 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, - 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, - 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, - 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, - 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, - 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, - 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, - 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, - 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, - 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, - 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, - 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, - 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, - 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, - 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, - 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, - 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, - 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, - 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, - 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, - 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, - 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, - 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, - 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, - 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, - 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, - 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, - 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, - 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, - 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, - 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, - 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, - 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, - 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, - 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, - 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, - 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, - 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, - 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, - 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, - 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, - 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, - 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, - 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, - 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, - 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, - 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, - 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, - 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, - 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, - 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, - 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, - 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, - 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, - 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, - 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, - 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, - 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, - 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, - 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, - 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, - 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, - 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, - 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, - 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, - 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, - 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, - 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, - 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, - 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, - 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, - 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, - 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, - 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, - 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, - 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, - 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, - 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, - 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, - 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, - 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, - 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, - 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, - 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, - 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, - 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, - 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, - 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, - 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, - 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, - 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, - 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, - 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, - 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, - 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, - 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, - 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, - 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, - 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, - 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, - 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, - 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, - 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, - 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, - 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, - 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, - 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, - 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, - 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, - 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, - 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, - 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, - 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, - 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, - 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, - 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, - 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, - 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, - 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, - 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, - 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, - 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, - 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, - 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, - 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, - 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, - 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, - 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, - 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, - 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, - 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, - 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, - 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, - 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, - 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, - 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, - 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, - 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, - 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, - 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, - 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, - 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, - 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, - 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, - 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, - 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, - 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, - 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, - 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, - 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, - 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, - 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, - 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, - 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, - 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, - 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, - 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, - 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, - 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, - 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, - 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, - 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, - 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, - 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, - 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, - 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, - 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, - 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, - 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, - 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, - 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, - 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, - 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, - 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, - 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, - 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, - 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, - 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, - 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, - 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, - 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, - 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, - 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, - 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, - 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, - 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, - 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, - 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, - 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, - 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, - 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, - 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, - 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, - 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, - 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, - 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, - 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, - 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, - 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, - 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, - 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, - 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, - 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, - 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, - 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, - 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, - 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, - 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, - 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, - 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, - 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, - 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, - 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, - 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, - 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, - 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, - 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, - 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, - 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, - 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, - 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, - 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, - 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, - 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, - 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, - 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, - 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, - 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, - 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, - 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, - 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, - 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, - 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, - 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, - 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, - 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, - 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, - 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, - 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, - 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, - 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, - 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, - 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, - 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, - 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, - 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, - 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, - 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, - 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, - 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, - 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, - 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, - 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, - 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, - 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, - 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, - 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, - 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, - 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, - 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, - 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, - 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, - 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, - 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, - 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, - 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, - 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, - 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, - 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, - 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, - 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, - 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, - 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, - 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, - 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, - 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, - 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, - 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, - 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, - 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, - 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, - 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, - 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, - 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, - 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, - 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, - 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, - 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, - 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, - 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, - 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, - 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, - 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, - 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, - 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, - 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, - 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, - 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, + // 12807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0xd7, + 0x75, 0x18, 0xac, 0x9e, 0x19, 0x3c, 0xe6, 0xe0, 0x7d, 0x77, 0x97, 0xc4, 0x82, 0xdc, 0x9d, 0x65, + 0x53, 0x5a, 0x2e, 0x45, 0x12, 0x2b, 0x2e, 0x49, 0x91, 0x16, 0x29, 0xda, 0x00, 0x06, 0xd8, 0x1d, + 0xee, 0x02, 0x3b, 0xbc, 0x83, 0x5d, 0x4a, 0x34, 0x25, 0xab, 0x31, 0x73, 0x01, 0x34, 0xd1, 0xe8, + 0x1e, 0x76, 0xf7, 0x60, 0x17, 0xfc, 0xec, 0xaa, 0x2f, 0x72, 0xec, 0x44, 0xb1, 0x2b, 0xa5, 0x8a, + 0x55, 0x79, 0xd8, 0x2e, 0xa7, 0xca, 0x71, 0xca, 0x76, 0x9c, 0xa4, 0xe2, 0xd8, 0xb1, 0x1d, 0xcb, + 0x4e, 0x1c, 0x3b, 0x3f, 0x9c, 0x3f, 0x8a, 0x93, 0xaa, 0x94, 0x5c, 0xe5, 0x0a, 0x62, 0xc3, 0x79, + 0x94, 0x7f, 0xe4, 0x51, 0x71, 0x7e, 0xc4, 0x88, 0x2b, 0x4e, 0xdd, 0x67, 0xdf, 0xdb, 0xd3, 0x3d, + 0x33, 0x58, 0x62, 0x41, 0x5a, 0xa5, 0x7f, 0x33, 0xf7, 0x9c, 0x7b, 0xee, 0xed, 0xfb, 0x3c, 0xe7, + 0xdc, 0xf3, 0x80, 0x57, 0x77, 0x5e, 0x89, 0xe6, 0xdd, 0xe0, 0xea, 0x4e, 0x67, 0x83, 0x84, 0x3e, + 0x89, 0x49, 0x74, 0x75, 0x8f, 0xf8, 0xad, 0x20, 0xbc, 0x2a, 0x00, 0x4e, 0xdb, 0xbd, 0xda, 0x0c, + 0x42, 0x72, 0x75, 0xef, 0xf9, 0xab, 0x5b, 0xc4, 0x27, 0xa1, 0x13, 0x93, 0xd6, 0x7c, 0x3b, 0x0c, + 0xe2, 0x00, 0x21, 0x8e, 0x33, 0xef, 0xb4, 0xdd, 0x79, 0x8a, 0x33, 0xbf, 0xf7, 0xfc, 0xdc, 0x73, + 0x5b, 0x6e, 0xbc, 0xdd, 0xd9, 0x98, 0x6f, 0x06, 0xbb, 0x57, 0xb7, 0x82, 0xad, 0xe0, 0x2a, 0x43, + 0xdd, 0xe8, 0x6c, 0xb2, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbd, 0x98, 0x34, 0xb3, 0xeb, + 0x34, 0xb7, 0x5d, 0x9f, 0x84, 0xfb, 0x57, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x27, + 0x6c, 0x92, 0x74, 0xc3, 0x3d, 0x6b, 0x45, 0x57, 0x77, 0x49, 0xec, 0x64, 0x74, 0x77, 0xee, 0x6a, + 0x5e, 0xad, 0xb0, 0xe3, 0xc7, 0xee, 0x6e, 0x77, 0x33, 0x9f, 0xee, 0x57, 0x21, 0x6a, 0x6e, 0x93, + 0x5d, 0xa7, 0xab, 0xde, 0x0b, 0x79, 0xf5, 0x3a, 0xb1, 0xeb, 0x5d, 0x75, 0xfd, 0x38, 0x8a, 0xc3, + 0x74, 0x25, 0xfb, 0x9b, 0x16, 0x5c, 0x5a, 0x78, 0xab, 0xb1, 0xec, 0x39, 0x51, 0xec, 0x36, 0x17, + 0xbd, 0xa0, 0xb9, 0xd3, 0x88, 0x83, 0x90, 0xdc, 0x0d, 0xbc, 0xce, 0x2e, 0x69, 0xb0, 0x81, 0x40, + 0xcf, 0xc2, 0xe8, 0x1e, 0xfb, 0x5f, 0xab, 0xce, 0x5a, 0x97, 0xac, 0x2b, 0xe5, 0xc5, 0xe9, 0xdf, + 0x3e, 0xa8, 0x7c, 0xec, 0xf0, 0xa0, 0x32, 0x7a, 0x57, 0x94, 0x63, 0x85, 0x81, 0x2e, 0xc3, 0xf0, + 0x66, 0xb4, 0xbe, 0xdf, 0x26, 0xb3, 0x05, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x57, 0x1a, 0xb4, 0x14, + 0x0b, 0x28, 0xba, 0x0a, 0xe5, 0xb6, 0x13, 0xc6, 0x6e, 0xec, 0x06, 0xfe, 0x6c, 0xf1, 0x92, 0x75, + 0x65, 0x68, 0x71, 0x46, 0xa0, 0x96, 0xeb, 0x12, 0x80, 0x13, 0x1c, 0xda, 0x8d, 0x90, 0x38, 0xad, + 0xdb, 0xbe, 0xb7, 0x3f, 0x5b, 0xba, 0x64, 0x5d, 0x19, 0x4d, 0xba, 0x81, 0x45, 0x39, 0x56, 0x18, + 0xf6, 0x8f, 0x16, 0x60, 0x74, 0x61, 0x73, 0xd3, 0xf5, 0xdd, 0x78, 0x1f, 0xdd, 0x85, 0x71, 0x3f, + 0x68, 0x11, 0xf9, 0x9f, 0x7d, 0xc5, 0xd8, 0xb5, 0x4b, 0xf3, 0xdd, 0x4b, 0x69, 0x7e, 0x4d, 0xc3, + 0x5b, 0x9c, 0x3e, 0x3c, 0xa8, 0x8c, 0xeb, 0x25, 0xd8, 0xa0, 0x83, 0x30, 0x8c, 0xb5, 0x83, 0x96, + 0x22, 0x5b, 0x60, 0x64, 0x2b, 0x59, 0x64, 0xeb, 0x09, 0xda, 0xe2, 0xd4, 0xe1, 0x41, 0x65, 0x4c, + 0x2b, 0xc0, 0x3a, 0x11, 0xb4, 0x01, 0x53, 0xf4, 0xaf, 0x1f, 0xbb, 0x8a, 0x6e, 0x91, 0xd1, 0x7d, + 0x32, 0x8f, 0xae, 0x86, 0xba, 0x78, 0xe6, 0xf0, 0xa0, 0x32, 0x95, 0x2a, 0xc4, 0x69, 0x82, 0xf6, + 0xfb, 0x30, 0xb9, 0x10, 0xc7, 0x4e, 0x73, 0x9b, 0xb4, 0xf8, 0x0c, 0xa2, 0x17, 0xa1, 0xe4, 0x3b, + 0xbb, 0x44, 0xcc, 0xef, 0x25, 0x31, 0xb0, 0xa5, 0x35, 0x67, 0x97, 0x1c, 0x1d, 0x54, 0xa6, 0xef, + 0xf8, 0xee, 0x7b, 0x1d, 0xb1, 0x2a, 0x68, 0x19, 0x66, 0xd8, 0xe8, 0x1a, 0x40, 0x8b, 0xec, 0xb9, + 0x4d, 0x52, 0x77, 0xe2, 0x6d, 0x31, 0xdf, 0x48, 0xd4, 0x85, 0xaa, 0x82, 0x60, 0x0d, 0xcb, 0xbe, + 0x0f, 0xe5, 0x85, 0xbd, 0xc0, 0x6d, 0xd5, 0x83, 0x56, 0x84, 0x76, 0x60, 0xaa, 0x1d, 0x92, 0x4d, + 0x12, 0xaa, 0xa2, 0x59, 0xeb, 0x52, 0xf1, 0xca, 0xd8, 0xb5, 0x2b, 0x99, 0x1f, 0x6b, 0xa2, 0x2e, + 0xfb, 0x71, 0xb8, 0xbf, 0xf8, 0xa8, 0x68, 0x6f, 0x2a, 0x05, 0xc5, 0x69, 0xca, 0xf6, 0xbf, 0x2c, + 0xc0, 0xb9, 0x85, 0xf7, 0x3b, 0x21, 0xa9, 0xba, 0xd1, 0x4e, 0x7a, 0x85, 0xb7, 0xdc, 0x68, 0x67, + 0x2d, 0x19, 0x01, 0xb5, 0xb4, 0xaa, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x1c, 0x8c, 0xd0, 0xdf, 0x77, + 0x70, 0x4d, 0x7c, 0xf2, 0x19, 0x81, 0x3c, 0x56, 0x75, 0x62, 0xa7, 0xca, 0x41, 0x58, 0xe2, 0xa0, + 0x55, 0x18, 0x6b, 0xb2, 0x0d, 0xb9, 0xb5, 0x1a, 0xb4, 0x08, 0x9b, 0xcc, 0xf2, 0xe2, 0x33, 0x14, + 0x7d, 0x29, 0x29, 0x3e, 0x3a, 0xa8, 0xcc, 0xf2, 0xbe, 0x09, 0x12, 0x1a, 0x0c, 0xeb, 0xf5, 0x91, + 0xad, 0xf6, 0x57, 0x89, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x45, 0xdb, 0x2a, 0x43, 0x6c, 0xab, 0x8c, + 0x67, 0x6f, 0x13, 0xf4, 0x3c, 0x94, 0x76, 0x5c, 0xbf, 0x35, 0x3b, 0xcc, 0x68, 0x5d, 0xa0, 0x73, + 0x7e, 0xd3, 0xf5, 0x5b, 0x47, 0x07, 0x95, 0x19, 0xa3, 0x3b, 0xb4, 0x10, 0x33, 0x54, 0xfb, 0x8f, + 0x2d, 0xa8, 0x30, 0xd8, 0x8a, 0xeb, 0x91, 0x3a, 0x09, 0x23, 0x37, 0x8a, 0x89, 0x1f, 0x1b, 0x03, + 0x7a, 0x0d, 0x20, 0x22, 0xcd, 0x90, 0xc4, 0xda, 0x90, 0xaa, 0x85, 0xd1, 0x50, 0x10, 0xac, 0x61, + 0xd1, 0x03, 0x21, 0xda, 0x76, 0x42, 0xb6, 0xbe, 0xc4, 0xc0, 0xaa, 0x03, 0xa1, 0x21, 0x01, 0x38, + 0xc1, 0x31, 0x0e, 0x84, 0x62, 0xbf, 0x03, 0x01, 0x7d, 0x16, 0xa6, 0x92, 0xc6, 0xa2, 0xb6, 0xd3, + 0x94, 0x03, 0xc8, 0xb6, 0x4c, 0xc3, 0x04, 0xe1, 0x34, 0xae, 0xfd, 0xf7, 0x2c, 0xb1, 0x78, 0xe8, + 0x57, 0x7f, 0xc4, 0xbf, 0xd5, 0xfe, 0x15, 0x0b, 0x46, 0x16, 0x5d, 0xbf, 0xe5, 0xfa, 0x5b, 0xe8, + 0x4b, 0x30, 0x4a, 0xef, 0xa6, 0x96, 0x13, 0x3b, 0xe2, 0xdc, 0xfb, 0x94, 0xb6, 0xb7, 0xd4, 0x55, + 0x31, 0xdf, 0xde, 0xd9, 0xa2, 0x05, 0xd1, 0x3c, 0xc5, 0xa6, 0xbb, 0xed, 0xf6, 0xc6, 0xbb, 0xa4, + 0x19, 0xaf, 0x92, 0xd8, 0x49, 0x3e, 0x27, 0x29, 0xc3, 0x8a, 0x2a, 0xba, 0x09, 0xc3, 0xb1, 0x13, + 0x6e, 0x91, 0x58, 0x1c, 0x80, 0x99, 0x07, 0x15, 0xaf, 0x89, 0xe9, 0x8e, 0x24, 0x7e, 0x93, 0x24, + 0xd7, 0xc2, 0x3a, 0xab, 0x8a, 0x05, 0x09, 0xfb, 0xaf, 0x0c, 0xc3, 0xf9, 0xa5, 0x46, 0x2d, 0x67, + 0x5d, 0x5d, 0x86, 0xe1, 0x56, 0xe8, 0xee, 0x91, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x65, 0xa5, 0x58, + 0x40, 0xd1, 0x2b, 0x30, 0xce, 0x2f, 0xa4, 0x1b, 0x8e, 0xdf, 0xf2, 0xe4, 0x10, 0x9f, 0x15, 0xd8, + 0xe3, 0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb9, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, + 0x15, 0x0b, 0xa6, 0x79, 0x33, 0x0b, 0x71, 0x1c, 0xba, 0x1b, 0x9d, 0x98, 0x44, 0xb3, 0x43, 0xec, + 0xa4, 0x5b, 0xca, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbf, 0x9b, 0xa2, 0xc2, 0x0f, 0xc1, 0x59, 0xd1, + 0xee, 0x74, 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6f, 0xc1, 0x5c, 0x33, 0xf0, 0xe3, 0x30, 0xf0, + 0x3c, 0x12, 0xd6, 0x3b, 0x1b, 0x9e, 0x1b, 0x6d, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x72, + 0xe6, 0x50, 0x21, 0x89, 0x39, 0xbc, 0x78, 0x78, 0x50, 0x99, 0x5b, 0xca, 0x25, 0x85, 0x7b, 0x34, + 0x83, 0x76, 0x00, 0xd1, 0xab, 0xb4, 0x11, 0x3b, 0x5b, 0x24, 0x69, 0x7c, 0x64, 0xf0, 0xc6, 0x1f, + 0x39, 0x3c, 0xa8, 0xa0, 0xb5, 0x2e, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x0f, 0xce, 0xd2, 0xd2, 0xae, + 0x6f, 0x1d, 0x1d, 0xbc, 0xb9, 0xd9, 0xc3, 0x83, 0xca, 0xd9, 0xb5, 0x0c, 0x22, 0x38, 0x93, 0xf4, + 0xdc, 0x12, 0x9c, 0xcb, 0x9c, 0x2a, 0x34, 0x0d, 0xc5, 0x1d, 0xc2, 0x59, 0x90, 0x32, 0xa6, 0x3f, + 0xd1, 0x59, 0x18, 0xda, 0x73, 0xbc, 0x8e, 0x58, 0xa5, 0x98, 0xff, 0xf9, 0x4c, 0xe1, 0x15, 0xcb, + 0x6e, 0xc2, 0xf8, 0x92, 0xd3, 0x76, 0x36, 0x5c, 0xcf, 0x8d, 0x5d, 0x12, 0xa1, 0xa7, 0xa0, 0xe8, + 0xb4, 0x5a, 0xec, 0x8a, 0x2c, 0x2f, 0x9e, 0x3b, 0x3c, 0xa8, 0x14, 0x17, 0x5a, 0xf4, 0xac, 0x06, + 0x85, 0xb5, 0x8f, 0x29, 0x06, 0xfa, 0x24, 0x94, 0x5a, 0x61, 0xd0, 0x9e, 0x2d, 0x30, 0x4c, 0x3a, + 0x54, 0xa5, 0x6a, 0x18, 0xb4, 0x53, 0xa8, 0x0c, 0xc7, 0xfe, 0x8d, 0x02, 0x3c, 0xbe, 0x44, 0xda, + 0xdb, 0x2b, 0x8d, 0x9c, 0x4d, 0x77, 0x05, 0x46, 0x77, 0x03, 0xdf, 0x8d, 0x83, 0x30, 0x12, 0x4d, + 0xb3, 0xdb, 0x64, 0x55, 0x94, 0x61, 0x05, 0x45, 0x97, 0xa0, 0xd4, 0x4e, 0x38, 0x81, 0x71, 0xc9, + 0x45, 0x30, 0x1e, 0x80, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0xb8, 0x05, 0x15, 0xc6, 0x9d, 0x88, + 0x84, 0x98, 0x41, 0x92, 0xe3, 0x94, 0x1e, 0xb4, 0x62, 0x5b, 0xa5, 0x8e, 0x53, 0x0a, 0xc1, 0x1a, + 0x16, 0xaa, 0x43, 0x39, 0x52, 0x93, 0x3a, 0x34, 0xf8, 0xa4, 0x4e, 0xb0, 0xf3, 0x56, 0xcd, 0x64, + 0x42, 0xc4, 0x38, 0x06, 0x86, 0xfb, 0x9e, 0xb7, 0x5f, 0x2f, 0x00, 0xe2, 0x43, 0xf8, 0xe7, 0x6c, + 0xe0, 0xee, 0x74, 0x0f, 0x5c, 0x26, 0xe7, 0x75, 0x2b, 0x68, 0x3a, 0x5e, 0xfa, 0x08, 0x3f, 0xa9, + 0xd1, 0xfb, 0x5f, 0x16, 0x3c, 0xbe, 0xe4, 0xfa, 0x2d, 0x12, 0xe6, 0x2c, 0xc0, 0x87, 0x23, 0x80, + 0x1c, 0xef, 0xa4, 0x37, 0x96, 0x58, 0xe9, 0x04, 0x96, 0x98, 0xfd, 0xdf, 0x2d, 0x40, 0xfc, 0xb3, + 0x3f, 0x72, 0x1f, 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x72, 0xc9, 0x73, + 0x89, 0x1f, 0xd7, 0xea, 0x4b, 0x81, 0xbf, 0xe9, 0x6e, 0xa1, 0xcf, 0xc0, 0x24, 0x95, 0x69, 0x83, + 0x4e, 0xdc, 0x20, 0xcd, 0xc0, 0x67, 0xec, 0x3f, 0x95, 0x04, 0xd1, 0xe1, 0x41, 0x65, 0x72, 0xdd, + 0x80, 0xe0, 0x14, 0xa6, 0xfd, 0x7b, 0x74, 0xfc, 0x82, 0xdd, 0x76, 0xe0, 0x13, 0x3f, 0x5e, 0x0a, + 0xfc, 0x16, 0x17, 0x13, 0x3f, 0x03, 0xa5, 0x98, 0x8e, 0x07, 0x1f, 0xbb, 0xcb, 0x72, 0xa3, 0xd0, + 0x51, 0x38, 0x3a, 0xa8, 0x3c, 0xd2, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x03, 0x86, 0xa3, + 0xd8, 0x89, 0x3b, 0x91, 0x18, 0xcd, 0x27, 0xe4, 0x68, 0x36, 0x58, 0xe9, 0xd1, 0x41, 0x65, 0x4a, + 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0xc8, 0x2e, 0x89, 0x22, 0x67, 0x4b, 0x72, 0xf8, + 0x53, 0xa2, 0xee, 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x84, 0x21, 0x12, 0x86, 0x41, 0x28, + 0xf6, 0xe8, 0x84, 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x6d, 0xc1, 0x94, 0xea, + 0x2b, 0x6f, 0xeb, 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x9a, 0xf2, 0x03, 0x23, 0x76, 0x7b, 0x8c, 0x5d, + 0xbb, 0x9c, 0xc9, 0xa0, 0x74, 0x0d, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66, 0xff, 0xba, + 0x05, 0x67, 0x52, 0x5f, 0x74, 0xcb, 0x8d, 0x62, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0f, 0xf6, 0x55, + 0xb4, 0x36, 0xfb, 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc9, 0xae, + 0xfc, 0x98, 0x27, 0x7b, 0x7e, 0x0c, 0xef, 0x55, 0x32, 0x23, 0x35, 0x5a, 0x13, 0x73, 0x02, 0xf6, + 0x8f, 0x14, 0xa1, 0xcc, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x41, 0x89, 0x51, 0xe7, + 0x1d, 0x7f, 0x2a, 0xbb, 0xe3, 0xa2, 0x3b, 0xf3, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, + 0x11, 0x66, 0x24, 0x90, 0x03, 0xb0, 0xe1, 0xfa, 0x4e, 0xb8, 0x4f, 0xcb, 0x66, 0x8b, 0x8c, 0xe0, + 0x73, 0xbd, 0x09, 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x01, 0x60, 0x8d, 0xe8, 0xdc, 0xcb, + 0x50, 0x56, 0xc8, 0xc7, 0xe1, 0x71, 0xe6, 0x3e, 0x0b, 0x53, 0xa9, 0xb6, 0xfa, 0x55, 0x1f, 0xd7, + 0x59, 0xa4, 0x5f, 0x65, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xbf, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0xac, + 0x97, 0x71, 0x38, 0x89, 0xa9, 0x1a, 0xfc, 0x30, 0x7b, 0x5c, 0x7c, 0xf6, 0xd9, 0x2c, 0x28, 0xce, + 0x6c, 0x83, 0x5e, 0xfb, 0x41, 0x9b, 0xae, 0x79, 0xc7, 0x63, 0xfd, 0x15, 0xd2, 0xf7, 0x6d, 0x51, + 0x86, 0x15, 0x94, 0x1e, 0x61, 0x67, 0x55, 0xe7, 0x6f, 0x92, 0xfd, 0x06, 0xf1, 0x48, 0x33, 0x0e, + 0xc2, 0x0f, 0xb5, 0xfb, 0x17, 0xf8, 0xe8, 0xf3, 0x13, 0x70, 0x4c, 0x10, 0x28, 0xde, 0x24, 0xfb, + 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0xf6, 0xfc, 0xba, 0x9f, 0xb7, 0x60, 0x42, 0x7d, 0xdd, 0x29, 0x6c, + 0xf5, 0x45, 0x73, 0xab, 0x5f, 0xe8, 0xb9, 0xc0, 0x73, 0x36, 0xf9, 0xd7, 0x0b, 0x70, 0x5e, 0xe1, + 0x50, 0x76, 0x9f, 0xff, 0x11, 0xab, 0xea, 0x2a, 0x94, 0x7d, 0xa5, 0x3d, 0xb0, 0x4c, 0xb1, 0x3d, + 0xd1, 0x1d, 0x24, 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x11, 0x7f, 0x5c, 0x57, 0xab, 0x09, 0x15, 0xda, + 0x22, 0x14, 0x3b, 0x6e, 0x4b, 0xdc, 0x19, 0x9f, 0x92, 0xa3, 0x7d, 0xa7, 0x56, 0x3d, 0x3a, 0xa8, + 0x3c, 0x91, 0xa7, 0xd2, 0xa5, 0x97, 0x55, 0x34, 0x7f, 0xa7, 0x56, 0xc5, 0xb4, 0x32, 0x5a, 0x80, + 0x29, 0xa9, 0xb5, 0xbe, 0x4b, 0x39, 0xa8, 0xc0, 0x17, 0x57, 0x8b, 0xd2, 0x8d, 0x61, 0x13, 0x8c, + 0xd3, 0xf8, 0xa8, 0x0a, 0xd3, 0x3b, 0x9d, 0x0d, 0xe2, 0x91, 0x98, 0x7f, 0xf0, 0x4d, 0xc2, 0x35, + 0x47, 0xe5, 0x44, 0xb4, 0xbc, 0x99, 0x82, 0xe3, 0xae, 0x1a, 0xf6, 0x9f, 0xb1, 0x23, 0x5e, 0x8c, + 0x5e, 0x3d, 0x0c, 0xe8, 0xc2, 0xa2, 0xd4, 0x3f, 0xcc, 0xe5, 0x3c, 0xc8, 0xaa, 0xb8, 0x49, 0xf6, + 0xd7, 0x03, 0xca, 0x6c, 0x67, 0xaf, 0x0a, 0x63, 0xcd, 0x97, 0x7a, 0xae, 0xf9, 0x5f, 0x2c, 0xc0, + 0x39, 0x35, 0x02, 0x06, 0x5f, 0xf7, 0xe7, 0x7d, 0x0c, 0x9e, 0x87, 0xb1, 0x16, 0xd9, 0x74, 0x3a, + 0x5e, 0xac, 0xd4, 0x98, 0x43, 0x5c, 0x95, 0x5d, 0x4d, 0x8a, 0xb1, 0x8e, 0x73, 0x8c, 0x61, 0xfb, + 0xa9, 0x31, 0x76, 0xb7, 0xc6, 0x0e, 0x5d, 0xe3, 0x6a, 0xd7, 0x58, 0xb9, 0xbb, 0xe6, 0x49, 0x18, + 0x72, 0x77, 0x29, 0xaf, 0x55, 0x30, 0x59, 0xa8, 0x1a, 0x2d, 0xc4, 0x1c, 0x86, 0x3e, 0x01, 0x23, + 0xcd, 0x60, 0x77, 0xd7, 0xf1, 0x5b, 0xec, 0xca, 0x2b, 0x2f, 0x8e, 0x51, 0x76, 0x6c, 0x89, 0x17, + 0x61, 0x09, 0x43, 0x8f, 0x43, 0xc9, 0x09, 0xb7, 0xa2, 0xd9, 0x12, 0xc3, 0x19, 0xa5, 0x2d, 0x2d, + 0x84, 0x5b, 0x11, 0x66, 0xa5, 0x54, 0xaa, 0xba, 0x17, 0x84, 0x3b, 0xae, 0xbf, 0x55, 0x75, 0x43, + 0xb1, 0x25, 0xd4, 0x5d, 0xf8, 0x96, 0x82, 0x60, 0x0d, 0x0b, 0xad, 0xc0, 0x50, 0x3b, 0x08, 0xe3, + 0x68, 0x76, 0x98, 0x0d, 0xf7, 0x13, 0x39, 0x07, 0x11, 0xff, 0xda, 0x7a, 0x10, 0xc6, 0xc9, 0x07, + 0xd0, 0x7f, 0x11, 0xe6, 0xd5, 0xd1, 0x77, 0x40, 0x91, 0xf8, 0x7b, 0xb3, 0x23, 0x8c, 0xca, 0x5c, + 0x16, 0x95, 0x65, 0x7f, 0xef, 0xae, 0x13, 0x26, 0xa7, 0xf4, 0xb2, 0xbf, 0x87, 0x69, 0x1d, 0xf4, + 0x79, 0x28, 0xcb, 0x2d, 0x1e, 0x09, 0x35, 0x47, 0xe6, 0x12, 0x93, 0x07, 0x03, 0x26, 0xef, 0x75, + 0xdc, 0x90, 0xec, 0x12, 0x3f, 0x8e, 0x92, 0x33, 0x4d, 0x42, 0x23, 0x9c, 0x50, 0x43, 0x9f, 0x97, + 0xba, 0xb5, 0xd5, 0xa0, 0xe3, 0xc7, 0xd1, 0x6c, 0x99, 0x75, 0x2f, 0xf3, 0xd5, 0xe3, 0x6e, 0x82, + 0x97, 0x56, 0xbe, 0xf1, 0xca, 0xd8, 0x20, 0x85, 0x30, 0x4c, 0x78, 0xee, 0x1e, 0xf1, 0x49, 0x14, + 0xd5, 0xc3, 0x60, 0x83, 0xcc, 0x02, 0xeb, 0xf9, 0xf9, 0xec, 0xc7, 0x80, 0x60, 0x83, 0x2c, 0xce, + 0x1c, 0x1e, 0x54, 0x26, 0x6e, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x49, 0x2a, 0xd7, 0xb8, + 0x09, 0xd1, 0xb1, 0x7e, 0x44, 0x99, 0xf4, 0x81, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x01, 0x65, + 0xcf, 0xdd, 0x24, 0xcd, 0xfd, 0xa6, 0x47, 0x66, 0xc7, 0x19, 0xc5, 0xcc, 0x6d, 0x75, 0x4b, 0x22, + 0x71, 0xb9, 0x48, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x17, 0x1e, 0x89, 0x49, 0xb8, 0xeb, 0xfa, 0x0e, + 0xdd, 0x0e, 0x42, 0x5e, 0x60, 0x4f, 0x2a, 0x13, 0x6c, 0xbd, 0x5d, 0x14, 0x43, 0xf7, 0xc8, 0x7a, + 0x26, 0x16, 0xce, 0xa9, 0x8d, 0x6e, 0xc3, 0x14, 0xdb, 0x09, 0xf5, 0x8e, 0xe7, 0xd5, 0x03, 0xcf, + 0x6d, 0xee, 0xcf, 0x4e, 0x32, 0x82, 0x9f, 0x90, 0xf7, 0x42, 0xcd, 0x04, 0x1f, 0x1d, 0x54, 0x20, + 0xf9, 0x87, 0xd3, 0xb5, 0xd1, 0x06, 0xd3, 0xa1, 0x77, 0x42, 0x37, 0xde, 0xa7, 0xeb, 0x97, 0xdc, + 0x8f, 0x67, 0xa7, 0x7a, 0x8a, 0xc2, 0x3a, 0xaa, 0x52, 0xb4, 0xeb, 0x85, 0x38, 0x4d, 0x90, 0x6e, + 0xed, 0x28, 0x6e, 0xb9, 0xfe, 0xec, 0x34, 0x3b, 0x31, 0xd4, 0xce, 0x68, 0xd0, 0x42, 0xcc, 0x61, + 0x4c, 0x7f, 0x4e, 0x7f, 0xdc, 0xa6, 0x27, 0xe8, 0x0c, 0x43, 0x4c, 0xf4, 0xe7, 0x12, 0x80, 0x13, + 0x1c, 0xca, 0xd4, 0xc4, 0xf1, 0xfe, 0x2c, 0x62, 0xa8, 0x6a, 0xbb, 0xac, 0xaf, 0x7f, 0x1e, 0xd3, + 0x72, 0x74, 0x0b, 0x46, 0x88, 0xbf, 0xb7, 0x12, 0x06, 0xbb, 0xb3, 0x67, 0xf2, 0xf7, 0xec, 0x32, + 0x47, 0xe1, 0x07, 0x7a, 0x22, 0xe0, 0x89, 0x62, 0x2c, 0x49, 0xa0, 0xfb, 0x30, 0x9b, 0x31, 0x23, + 0x7c, 0x02, 0xce, 0xb2, 0x09, 0x78, 0x4d, 0xd4, 0x9d, 0x5d, 0xcf, 0xc1, 0x3b, 0xea, 0x01, 0xc3, + 0xb9, 0xd4, 0xd1, 0x17, 0x60, 0x82, 0x6f, 0x28, 0xfe, 0xf8, 0x16, 0xcd, 0x9e, 0x63, 0x5f, 0x73, + 0x29, 0x7f, 0x73, 0x72, 0xc4, 0xc5, 0x73, 0xa2, 0x43, 0x13, 0x7a, 0x69, 0x84, 0x4d, 0x6a, 0xf6, + 0x06, 0x4c, 0xaa, 0x73, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x21, 0xc6, 0xed, 0x08, 0xfd, 0x56, 0x99, + 0xce, 0x14, 0xe3, 0x84, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0xbe, 0x4f, 0x16, 0xf7, 0x63, 0xc2, 0xa5, + 0xea, 0xa2, 0x36, 0x53, 0x12, 0x80, 0x13, 0x1c, 0xfb, 0xff, 0x72, 0xae, 0x31, 0x39, 0x1c, 0x07, + 0xb8, 0x0e, 0x9e, 0x85, 0xd1, 0xed, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x28, 0xe1, 0x13, 0x6f, + 0x88, 0x72, 0xac, 0x30, 0xd0, 0xab, 0x30, 0xd1, 0xd4, 0x1b, 0x10, 0x77, 0x99, 0x1a, 0x02, 0xa3, + 0x75, 0x6c, 0xe2, 0xa2, 0x57, 0x60, 0x94, 0x3d, 0x9d, 0x37, 0x03, 0x4f, 0x30, 0x59, 0xf2, 0x42, + 0x1e, 0xad, 0x8b, 0xf2, 0x23, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x32, 0x0c, 0xd3, 0x2e, 0xd4, 0xea, + 0xe2, 0x16, 0x51, 0xaa, 0x9a, 0x1b, 0xac, 0x14, 0x0b, 0xa8, 0xfd, 0xd7, 0x0a, 0xda, 0x28, 0x53, + 0x89, 0x94, 0xa0, 0x3a, 0x8c, 0xdc, 0x73, 0xdc, 0xd8, 0xf5, 0xb7, 0x04, 0xbb, 0xf0, 0x74, 0xcf, + 0x2b, 0x85, 0x55, 0x7a, 0x8b, 0x57, 0xe0, 0x97, 0x9e, 0xf8, 0x83, 0x25, 0x19, 0x4a, 0x31, 0xec, + 0xf8, 0x3e, 0xa5, 0x58, 0x18, 0x94, 0x22, 0xe6, 0x15, 0x38, 0x45, 0xf1, 0x07, 0x4b, 0x32, 0xe8, + 0x1d, 0x00, 0xb9, 0x2c, 0x49, 0x4b, 0x3c, 0x59, 0x3f, 0xdb, 0x9f, 0xe8, 0xba, 0xaa, 0xb3, 0x38, + 0x49, 0xaf, 0xd4, 0xe4, 0x3f, 0xd6, 0xe8, 0xd9, 0x31, 0x63, 0xab, 0xba, 0x3b, 0x83, 0xbe, 0x9b, + 0x9e, 0x04, 0x4e, 0x18, 0x93, 0xd6, 0x42, 0x2c, 0x06, 0xe7, 0x93, 0x83, 0xc9, 0x14, 0xeb, 0xee, + 0x2e, 0xd1, 0x4f, 0x0d, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x5f, 0x2e, 0xc2, 0x6c, 0x5e, 0x77, 0xe9, + 0xa2, 0x23, 0xf7, 0xdd, 0x78, 0x89, 0x72, 0x43, 0x96, 0xb9, 0xe8, 0x96, 0x45, 0x39, 0x56, 0x18, + 0x74, 0xf6, 0x23, 0x77, 0x4b, 0x8a, 0x84, 0x43, 0xc9, 0xec, 0x37, 0x58, 0x29, 0x16, 0x50, 0x8a, + 0x17, 0x12, 0x27, 0x12, 0x36, 0x11, 0xda, 0x2a, 0xc1, 0xac, 0x14, 0x0b, 0xa8, 0xae, 0x6f, 0x2a, + 0xf5, 0xd1, 0x37, 0x19, 0x43, 0x34, 0x74, 0xb2, 0x43, 0x84, 0xbe, 0x08, 0xb0, 0xe9, 0xfa, 0x6e, + 0xb4, 0xcd, 0xa8, 0x0f, 0x1f, 0x9b, 0xba, 0xe2, 0xa5, 0x56, 0x14, 0x15, 0xac, 0x51, 0x44, 0x2f, + 0xc1, 0x98, 0xda, 0x80, 0xb5, 0x2a, 0x7b, 0x20, 0xd2, 0x1e, 0xdc, 0x93, 0xd3, 0xa8, 0x8a, 0x75, + 0x3c, 0xfb, 0xdd, 0xf4, 0x7a, 0x11, 0x3b, 0x40, 0x1b, 0x5f, 0x6b, 0xd0, 0xf1, 0x2d, 0xf4, 0x1e, + 0x5f, 0xfb, 0x37, 0x8b, 0x30, 0x65, 0x34, 0xd6, 0x89, 0x06, 0x38, 0xb3, 0xae, 0xd3, 0x7b, 0xce, + 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0xdf, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x8b, + 0x50, 0xf6, 0x9c, 0x88, 0xe9, 0xae, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xe4, 0x08, 0x27, 0x8a, + 0xb5, 0xab, 0x86, 0xd3, 0x4e, 0x48, 0xd2, 0x0b, 0x99, 0xf2, 0x3e, 0xd2, 0xe8, 0x46, 0x75, 0x82, + 0x32, 0x48, 0xfb, 0x98, 0xc3, 0xd0, 0x2b, 0x30, 0x1e, 0x12, 0xb6, 0x2a, 0x96, 0x28, 0x2b, 0xc7, + 0x96, 0xd9, 0x50, 0xc2, 0xf3, 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x84, 0x95, 0x1f, 0xee, 0xc1, 0xca, + 0x3f, 0x0d, 0x23, 0xec, 0x87, 0x5a, 0x01, 0x6a, 0x36, 0x6a, 0xbc, 0x18, 0x4b, 0x78, 0x7a, 0xc1, + 0x8c, 0x0e, 0xb8, 0x60, 0x3e, 0x09, 0x93, 0x55, 0x87, 0xec, 0x06, 0xfe, 0xb2, 0xdf, 0x6a, 0x07, + 0xae, 0x1f, 0xa3, 0x59, 0x28, 0xb1, 0xdb, 0x81, 0xef, 0xed, 0x12, 0xa5, 0x80, 0x4b, 0x94, 0x31, + 0xb7, 0xb7, 0xe0, 0x5c, 0x35, 0xb8, 0xe7, 0xdf, 0x73, 0xc2, 0xd6, 0x42, 0xbd, 0xa6, 0xc9, 0xb9, + 0x6b, 0x52, 0xce, 0xe2, 0x46, 0x2c, 0x99, 0x67, 0xaa, 0x56, 0x93, 0xdf, 0xb5, 0x2b, 0xae, 0x47, + 0x72, 0xb4, 0x11, 0x7f, 0xa3, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x07, 0x23, 0x2b, 0xf7, 0xc1, 0xe8, + 0x4d, 0x18, 0xdd, 0x74, 0x89, 0xd7, 0xc2, 0x64, 0x53, 0x2c, 0xb1, 0xa7, 0xf2, 0xdf, 0xe5, 0x57, + 0x28, 0xa6, 0xd4, 0x3e, 0x71, 0x29, 0x6d, 0x45, 0x54, 0xc6, 0x8a, 0x0c, 0xda, 0x81, 0x69, 0x29, + 0x06, 0x48, 0xa8, 0x58, 0x70, 0x4f, 0xf7, 0x92, 0x2d, 0x4c, 0xe2, 0x67, 0x0f, 0x0f, 0x2a, 0xd3, + 0x38, 0x45, 0x06, 0x77, 0x11, 0xa6, 0x62, 0xd9, 0x2e, 0x3d, 0x5a, 0x4b, 0x6c, 0xf8, 0x99, 0x58, + 0xc6, 0x24, 0x4c, 0x56, 0x6a, 0xff, 0xb8, 0x05, 0x8f, 0x76, 0x8d, 0x8c, 0x90, 0xb4, 0x4f, 0x78, + 0x16, 0xd2, 0x92, 0x6f, 0xa1, 0xbf, 0xe4, 0x6b, 0xff, 0x7d, 0x0b, 0xce, 0x2e, 0xef, 0xb6, 0xe3, + 0xfd, 0xaa, 0x6b, 0xbe, 0xee, 0xbc, 0x0c, 0xc3, 0xbb, 0xa4, 0xe5, 0x76, 0x76, 0xc5, 0xcc, 0x55, + 0xe4, 0xf1, 0xb3, 0xca, 0x4a, 0x8f, 0x0e, 0x2a, 0x13, 0x8d, 0x38, 0x08, 0x9d, 0x2d, 0xc2, 0x0b, + 0xb0, 0x40, 0x67, 0x87, 0xb8, 0xfb, 0x3e, 0xb9, 0xe5, 0xee, 0xba, 0xd2, 0xce, 0xa2, 0xa7, 0xee, + 0x6c, 0x5e, 0x0e, 0xe8, 0xfc, 0x9b, 0x1d, 0xc7, 0x8f, 0xdd, 0x78, 0x5f, 0x3c, 0xcc, 0x48, 0x22, + 0x38, 0xa1, 0x67, 0x7f, 0xd3, 0x82, 0x29, 0xb9, 0xee, 0x17, 0x5a, 0xad, 0x90, 0x44, 0x11, 0x9a, + 0x83, 0x82, 0xdb, 0x16, 0xbd, 0x04, 0xd1, 0xcb, 0x42, 0xad, 0x8e, 0x0b, 0x6e, 0x1b, 0xd5, 0xa1, + 0xcc, 0xcd, 0x35, 0x92, 0xc5, 0x35, 0x90, 0xd1, 0x07, 0xeb, 0xc1, 0xba, 0xac, 0x89, 0x13, 0x22, + 0x92, 0x83, 0x63, 0x67, 0x66, 0xd1, 0x7c, 0xf5, 0xba, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x60, + 0xd4, 0x0f, 0x5a, 0xdc, 0x7a, 0x86, 0xdf, 0x7e, 0x6c, 0xc9, 0xae, 0x89, 0x32, 0xac, 0xa0, 0xf6, + 0x0f, 0x5b, 0x30, 0x2e, 0xbf, 0x6c, 0x40, 0x66, 0x92, 0x6e, 0xad, 0x84, 0x91, 0x4c, 0xb6, 0x16, + 0x65, 0x06, 0x19, 0xc4, 0xe0, 0x01, 0x8b, 0xc7, 0xe1, 0x01, 0xed, 0x1f, 0x2b, 0xc0, 0xa4, 0xec, + 0x4e, 0xa3, 0xb3, 0x11, 0x91, 0x18, 0xad, 0x43, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, 0x32, + 0x5b, 0xf8, 0x30, 0xe6, 0x27, 0xb9, 0x96, 0x17, 0x64, 0x6d, 0x9c, 0x10, 0x42, 0x1e, 0xcc, 0xf8, + 0x41, 0xcc, 0x8e, 0x68, 0x05, 0xef, 0xf5, 0x04, 0x92, 0xa6, 0x7e, 0x5e, 0x50, 0x9f, 0x59, 0x4b, + 0x53, 0xc1, 0xdd, 0x84, 0xd1, 0xb2, 0x54, 0x78, 0x14, 0xf3, 0xc5, 0x0d, 0x7d, 0x16, 0xb2, 0xf5, + 0x1d, 0xf6, 0xaf, 0x59, 0x50, 0x96, 0x68, 0xa7, 0xf1, 0xda, 0xb5, 0x0a, 0x23, 0x11, 0x9b, 0x04, + 0x39, 0x34, 0x76, 0xaf, 0x8e, 0xf3, 0xf9, 0x4a, 0x6e, 0x1e, 0xfe, 0x3f, 0xc2, 0x92, 0x06, 0xd3, + 0x77, 0xab, 0xee, 0x7f, 0x44, 0xf4, 0xdd, 0xaa, 0x3f, 0x39, 0x37, 0xcc, 0x7f, 0x61, 0x7d, 0xd6, + 0xc4, 0x5a, 0xca, 0x20, 0xb5, 0x43, 0xb2, 0xe9, 0xde, 0x4f, 0x33, 0x48, 0x75, 0x56, 0x8a, 0x05, + 0x14, 0xbd, 0x03, 0xe3, 0x4d, 0xa9, 0xe8, 0x4c, 0x8e, 0x81, 0xcb, 0x3d, 0x95, 0xee, 0xea, 0x7d, + 0x86, 0x5b, 0xd6, 0x2e, 0x69, 0xf5, 0xb1, 0x41, 0xcd, 0x7c, 0x6e, 0x2f, 0xf6, 0x7b, 0x6e, 0x4f, + 0xe8, 0xe6, 0x3f, 0x3e, 0xff, 0x84, 0x05, 0xc3, 0x5c, 0x5d, 0x36, 0x98, 0x7e, 0x51, 0x7b, 0xae, + 0x4a, 0xc6, 0xee, 0x2e, 0x2d, 0x14, 0xcf, 0x4f, 0x68, 0x15, 0xca, 0xec, 0x07, 0x53, 0x1b, 0x14, + 0xf3, 0x4d, 0x8a, 0x79, 0xab, 0x7a, 0x07, 0xef, 0xca, 0x6a, 0x38, 0xa1, 0x60, 0x7f, 0xad, 0x48, + 0x8f, 0xaa, 0x04, 0xd5, 0xb8, 0xc1, 0xad, 0x87, 0x77, 0x83, 0x17, 0x1e, 0xd6, 0x0d, 0xbe, 0x05, + 0x53, 0x4d, 0xed, 0x71, 0x2b, 0x99, 0xc9, 0x2b, 0x3d, 0x17, 0x89, 0xf6, 0x0e, 0xc6, 0x55, 0x46, + 0x4b, 0x26, 0x11, 0x9c, 0xa6, 0x8a, 0xbe, 0x1b, 0xc6, 0xf9, 0x3c, 0x8b, 0x56, 0xb8, 0xc5, 0xc2, + 0x27, 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x5b, 0x89, 0x0d, 0xad, 0x3a, 0x36, 0x88, 0xd9, 0xbf, 0x3c, + 0x0a, 0x43, 0xcb, 0x7b, 0xc4, 0x8f, 0x4f, 0xe1, 0x40, 0x6a, 0xc2, 0xa4, 0xeb, 0xef, 0x05, 0xde, + 0x1e, 0x69, 0x71, 0xf8, 0x71, 0x2e, 0xd7, 0x47, 0x04, 0xe9, 0xc9, 0x9a, 0x41, 0x02, 0xa7, 0x48, + 0x3e, 0x0c, 0x09, 0xf3, 0x3a, 0x0c, 0xf3, 0xb9, 0x17, 0xe2, 0x65, 0xa6, 0x32, 0x98, 0x0d, 0xa2, + 0xd8, 0x05, 0x89, 0xf4, 0xcb, 0xb5, 0xcf, 0xa2, 0x3a, 0x7a, 0x17, 0x26, 0x37, 0xdd, 0x30, 0x8a, + 0xa9, 0x68, 0x18, 0xc5, 0xce, 0x6e, 0xfb, 0x01, 0x24, 0x4a, 0x35, 0x0e, 0x2b, 0x06, 0x25, 0x9c, + 0xa2, 0x8c, 0xb6, 0x60, 0x82, 0x0a, 0x39, 0x49, 0x53, 0x23, 0xc7, 0x6e, 0x4a, 0xa9, 0x8c, 0x6e, + 0xe9, 0x84, 0xb0, 0x49, 0x97, 0x1e, 0x26, 0x4d, 0x26, 0x14, 0x8d, 0x32, 0x8e, 0x42, 0x1d, 0x26, + 0x5c, 0x1a, 0xe2, 0x30, 0x7a, 0x26, 0x31, 0xb3, 0x95, 0xb2, 0x79, 0x26, 0x69, 0xc6, 0x29, 0x5f, + 0x82, 0x32, 0xa1, 0x43, 0x48, 0x09, 0x0b, 0xc5, 0xf8, 0xd5, 0xc1, 0xfa, 0xba, 0xea, 0x36, 0xc3, + 0xc0, 0x94, 0xe5, 0x97, 0x25, 0x25, 0x9c, 0x10, 0x45, 0x4b, 0x30, 0x1c, 0x91, 0xd0, 0x25, 0x91, + 0x50, 0x91, 0xf7, 0x98, 0x46, 0x86, 0xc6, 0x6d, 0xcf, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, + 0x30, 0x69, 0x88, 0x69, 0xc5, 0xb5, 0xe5, 0xb5, 0xc0, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0x60, 0x24, + 0x24, 0x1e, 0x53, 0x16, 0x4d, 0x0c, 0xbe, 0xc8, 0xb9, 0xee, 0x89, 0xd7, 0xc3, 0x92, 0x00, 0xba, + 0x09, 0x28, 0x24, 0x94, 0x87, 0x70, 0xfd, 0x2d, 0x65, 0xcc, 0x21, 0x74, 0xdd, 0x8f, 0x89, 0xf6, + 0xcf, 0xe0, 0x04, 0x43, 0x5a, 0xa5, 0xe2, 0x8c, 0x6a, 0xe8, 0x3a, 0xcc, 0xa8, 0xd2, 0x9a, 0x1f, + 0xc5, 0x8e, 0xdf, 0x24, 0x4c, 0xcd, 0x5d, 0x4e, 0xb8, 0x22, 0x9c, 0x46, 0xc0, 0xdd, 0x75, 0xec, + 0x9f, 0xa5, 0xec, 0x0c, 0x1d, 0xad, 0x53, 0xe0, 0x05, 0x5e, 0x37, 0x79, 0x81, 0xf3, 0xb9, 0x33, + 0x97, 0xc3, 0x07, 0x1c, 0x5a, 0x30, 0xa6, 0xcd, 0x6c, 0xb2, 0x66, 0xad, 0x1e, 0x6b, 0xb6, 0x03, + 0xd3, 0x74, 0xa5, 0xdf, 0xde, 0x88, 0x48, 0xb8, 0x47, 0x5a, 0x6c, 0x61, 0x16, 0x1e, 0x6c, 0x61, + 0xaa, 0x57, 0xe6, 0x5b, 0x29, 0x82, 0xb8, 0xab, 0x09, 0xf4, 0xb2, 0xd4, 0x9c, 0x14, 0x0d, 0x23, + 0x2d, 0xae, 0x15, 0x39, 0x3a, 0xa8, 0x4c, 0x6b, 0x1f, 0xa2, 0x6b, 0x4a, 0xec, 0x2f, 0xc9, 0x6f, + 0x54, 0xaf, 0xf9, 0x4d, 0xb5, 0x58, 0x52, 0xaf, 0xf9, 0x6a, 0x39, 0xe0, 0x04, 0x87, 0xee, 0x51, + 0x2a, 0x82, 0xa4, 0x5f, 0xf3, 0xa9, 0x80, 0x82, 0x19, 0xc4, 0x7e, 0x01, 0x60, 0xf9, 0x3e, 0x69, + 0xf2, 0xa5, 0xae, 0x3f, 0x40, 0x5a, 0xf9, 0x0f, 0x90, 0xf6, 0xbf, 0xb5, 0x60, 0x72, 0x65, 0xc9, + 0x10, 0x13, 0xe7, 0x01, 0xb8, 0x6c, 0xf4, 0xd6, 0x5b, 0x6b, 0x52, 0xb7, 0xce, 0xd5, 0xa3, 0xaa, + 0x14, 0x6b, 0x18, 0xe8, 0x3c, 0x14, 0xbd, 0x8e, 0x2f, 0x44, 0x96, 0x91, 0xc3, 0x83, 0x4a, 0xf1, + 0x56, 0xc7, 0xc7, 0xb4, 0x4c, 0xb3, 0x10, 0x2c, 0x0e, 0x6c, 0x21, 0xd8, 0xd7, 0xbd, 0x0a, 0x55, + 0x60, 0xe8, 0xde, 0x3d, 0xb7, 0xc5, 0x8d, 0xd8, 0x85, 0xde, 0xff, 0xad, 0xb7, 0x6a, 0xd5, 0x08, + 0xf3, 0x72, 0xfb, 0xab, 0x45, 0x98, 0x5b, 0xf1, 0xc8, 0xfd, 0x0f, 0x68, 0xc8, 0x3f, 0xa8, 0x7d, + 0xe3, 0xf1, 0xf8, 0xc5, 0xe3, 0xda, 0xb0, 0xf6, 0x1f, 0x8f, 0x4d, 0x18, 0xe1, 0x8f, 0xd9, 0xd2, + 0xac, 0xff, 0xd5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x9e, 0x3f, 0x8a, 0x0b, 0x73, 0x7e, 0x75, 0xd3, + 0x8a, 0x52, 0x2c, 0x89, 0xcf, 0x7d, 0x06, 0xc6, 0x75, 0xcc, 0x63, 0x59, 0x93, 0xff, 0x85, 0x22, + 0x4c, 0xd3, 0x1e, 0x3c, 0xd4, 0x89, 0xb8, 0xd3, 0x3d, 0x11, 0x27, 0x6d, 0x51, 0xdc, 0x7f, 0x36, + 0xde, 0x49, 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xda, 0x73, 0xf0, 0xfd, 0x16, 0x9c, 0x59, 0xf1, + 0x82, 0xe6, 0x4e, 0xca, 0xea, 0xf7, 0x25, 0x18, 0xa3, 0xe7, 0x78, 0x64, 0x78, 0x11, 0x19, 0x7e, + 0x65, 0x02, 0x84, 0x75, 0x3c, 0xad, 0xda, 0x9d, 0x3b, 0xb5, 0x6a, 0x96, 0x3b, 0x9a, 0x00, 0x61, + 0x1d, 0xcf, 0xfe, 0x86, 0x05, 0x17, 0xae, 0x2f, 0x2d, 0x27, 0x4b, 0xb1, 0xcb, 0x23, 0x8e, 0x4a, + 0x81, 0x2d, 0xad, 0x2b, 0x89, 0x14, 0x58, 0x65, 0xbd, 0x10, 0xd0, 0x8f, 0x8a, 0xb7, 0xe7, 0xcf, + 0x58, 0x70, 0xe6, 0xba, 0x1b, 0xd3, 0x6b, 0x39, 0xed, 0x9b, 0x45, 0xef, 0xe5, 0xc8, 0x8d, 0x83, + 0x70, 0x3f, 0xed, 0x9b, 0x85, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, 0x97, 0x99, 0x51, 0x15, + 0x4c, 0x55, 0x14, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0x6b, 0xb9, 0x21, 0x13, 0x25, 0xf6, 0xc5, + 0x09, 0xab, 0x3e, 0xac, 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0x7f, 0xdc, 0x82, 0x73, 0xd7, 0xbd, 0x4e, + 0x14, 0x93, 0x70, 0x33, 0x32, 0x3a, 0xfb, 0x02, 0x94, 0x89, 0x14, 0xd7, 0x45, 0x5f, 0x15, 0x83, + 0xa9, 0xe4, 0x78, 0xee, 0x18, 0xa6, 0xf0, 0x06, 0xf0, 0x1c, 0x38, 0x9e, 0xeb, 0xd8, 0x2f, 0x14, + 0x60, 0xe2, 0xc6, 0xfa, 0x7a, 0xfd, 0x3a, 0x89, 0xc5, 0x2d, 0xd6, 0x5f, 0xd5, 0x8c, 0x35, 0x8d, + 0x59, 0x2f, 0xa1, 0xa8, 0x13, 0xbb, 0xde, 0x3c, 0xf7, 0x44, 0x9e, 0xaf, 0xf9, 0xf1, 0xed, 0xb0, + 0x11, 0x87, 0xae, 0xbf, 0x95, 0xa9, 0x63, 0x93, 0x77, 0x6d, 0x31, 0xef, 0xae, 0x45, 0x2f, 0xc0, + 0x30, 0x73, 0x85, 0x96, 0xe2, 0xc9, 0x63, 0x4a, 0xa6, 0x60, 0xa5, 0x47, 0x07, 0x95, 0xf2, 0x1d, + 0x5c, 0xe3, 0x7f, 0xb0, 0x40, 0x45, 0x77, 0x60, 0x6c, 0x3b, 0x8e, 0xdb, 0x37, 0x88, 0xd3, 0x22, + 0xa1, 0x3c, 0x1d, 0x2e, 0x66, 0x9d, 0x0e, 0x74, 0x10, 0x38, 0x5a, 0xb2, 0xa1, 0x92, 0xb2, 0x08, + 0xeb, 0x74, 0xec, 0x06, 0x40, 0x02, 0x3b, 0x21, 0xfd, 0x82, 0xfd, 0x87, 0x16, 0x8c, 0x70, 0xaf, + 0xb4, 0x10, 0xbd, 0x06, 0x25, 0x72, 0x9f, 0x34, 0x05, 0xe7, 0x98, 0xd9, 0xe1, 0x84, 0xf1, 0xe0, + 0xda, 0x72, 0xfa, 0x1f, 0xb3, 0x5a, 0xe8, 0x06, 0x8c, 0xd0, 0xde, 0x5e, 0x57, 0x2e, 0x7a, 0x4f, + 0xe4, 0x7d, 0xb1, 0x9a, 0x76, 0xce, 0xab, 0x88, 0x22, 0x2c, 0xab, 0x33, 0xcd, 0x6f, 0xb3, 0xdd, + 0xa0, 0x07, 0x58, 0xdc, 0xeb, 0x9e, 0x5d, 0x5f, 0xaa, 0x73, 0x24, 0x41, 0x8d, 0x6b, 0x7e, 0x65, + 0x21, 0x4e, 0x88, 0xd8, 0xeb, 0x50, 0xa6, 0x93, 0xba, 0xe0, 0xb9, 0x4e, 0x6f, 0xa5, 0xf3, 0x33, + 0x50, 0x96, 0x0a, 0xe0, 0x48, 0x38, 0x36, 0x31, 0xaa, 0x52, 0x3f, 0x1c, 0xe1, 0x04, 0x6e, 0x6f, + 0xc2, 0x59, 0xf6, 0xf2, 0xef, 0xc4, 0xdb, 0xc6, 0x1e, 0xeb, 0xbf, 0x98, 0x9f, 0x15, 0x82, 0x18, + 0x9f, 0x99, 0x59, 0xcd, 0x77, 0x60, 0x5c, 0x52, 0x4c, 0x84, 0x32, 0xfb, 0x8f, 0x4a, 0xf0, 0x58, + 0xad, 0x91, 0xef, 0xb0, 0xf8, 0x0a, 0x8c, 0x73, 0x36, 0x8d, 0x2e, 0x6d, 0xc7, 0x13, 0xed, 0xaa, + 0x77, 0xb1, 0x75, 0x0d, 0x86, 0x0d, 0x4c, 0x74, 0x01, 0x8a, 0xee, 0x7b, 0x7e, 0xda, 0x0c, 0xb7, + 0xf6, 0xe6, 0x1a, 0xa6, 0xe5, 0x14, 0x4c, 0x39, 0x3e, 0x7e, 0x94, 0x2a, 0xb0, 0xe2, 0xfa, 0x5e, + 0x87, 0x49, 0x37, 0x6a, 0x46, 0x6e, 0xcd, 0xa7, 0xe7, 0x4c, 0xe2, 0xec, 0x9a, 0x28, 0x09, 0x68, + 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0xb5, 0x73, 0x7d, 0x68, 0x60, 0xae, 0xb1, 0xaf, 0xa7, 0x0f, 0x65, + 0x88, 0xdb, 0xec, 0xeb, 0x22, 0x66, 0xd4, 0x26, 0x18, 0x62, 0xfe, 0xc1, 0x11, 0x96, 0x30, 0x2a, + 0x81, 0x35, 0xb7, 0x9d, 0xf6, 0x42, 0x27, 0xde, 0xae, 0xba, 0x51, 0x33, 0xd8, 0x23, 0xe1, 0x3e, + 0x13, 0x9e, 0x47, 0x13, 0x09, 0x4c, 0x01, 0x96, 0x6e, 0x2c, 0xd4, 0x29, 0x26, 0xee, 0xae, 0x63, + 0x72, 0x85, 0x70, 0x12, 0x5c, 0xe1, 0x02, 0x4c, 0xc9, 0x66, 0x1a, 0x24, 0x62, 0x77, 0xc4, 0x18, + 0xeb, 0x98, 0x32, 0xb5, 0x15, 0xc5, 0xaa, 0x5b, 0x69, 0x7c, 0xf4, 0x32, 0x4c, 0xb8, 0xbe, 0x1b, + 0xbb, 0x4e, 0x1c, 0x84, 0xec, 0x86, 0xe5, 0x72, 0x32, 0xb3, 0x64, 0xab, 0xe9, 0x00, 0x6c, 0xe2, + 0xd9, 0xff, 0xb1, 0x04, 0x33, 0x6c, 0xda, 0xbe, 0xbd, 0xc2, 0x3e, 0x32, 0x2b, 0xec, 0x4e, 0xf7, + 0x0a, 0x3b, 0x09, 0x76, 0xf7, 0xc3, 0x5c, 0x66, 0xef, 0x42, 0x59, 0xd9, 0x02, 0x4b, 0x67, 0x00, + 0x2b, 0xc7, 0x19, 0xa0, 0x3f, 0xf7, 0x21, 0x9f, 0x71, 0x8b, 0x99, 0xcf, 0xb8, 0x7f, 0xcb, 0x82, + 0xc4, 0x24, 0x12, 0xdd, 0x80, 0x72, 0x3b, 0x60, 0x66, 0x07, 0xa1, 0xb4, 0xe5, 0x79, 0x2c, 0xf3, + 0xa2, 0xe2, 0x97, 0x22, 0x1f, 0xbf, 0xba, 0xac, 0x81, 0x93, 0xca, 0x68, 0x11, 0x46, 0xda, 0x21, + 0x69, 0xc4, 0xcc, 0x05, 0xb6, 0x2f, 0x1d, 0xbe, 0x46, 0x38, 0x3e, 0x96, 0x15, 0xed, 0x5f, 0xb4, + 0x00, 0xf8, 0x4b, 0xa9, 0xe3, 0x6f, 0x91, 0x53, 0xd0, 0xfe, 0x56, 0xa1, 0x14, 0xb5, 0x49, 0xb3, + 0x97, 0x41, 0x48, 0xd2, 0x9f, 0x46, 0x9b, 0x34, 0x93, 0x01, 0xa7, 0xff, 0x30, 0xab, 0x6d, 0xff, + 0x00, 0xc0, 0x64, 0x82, 0x56, 0x8b, 0xc9, 0x2e, 0x7a, 0xce, 0x70, 0x89, 0x3b, 0x9f, 0x72, 0x89, + 0x2b, 0x33, 0x6c, 0x4d, 0xd1, 0xf8, 0x2e, 0x14, 0x77, 0x9d, 0xfb, 0x42, 0x93, 0xf4, 0x4c, 0xef, + 0x6e, 0x50, 0xfa, 0xf3, 0xab, 0xce, 0x7d, 0x2e, 0x33, 0x3d, 0x23, 0x17, 0xc8, 0xaa, 0x73, 0xff, + 0x88, 0x9b, 0x7d, 0xb0, 0x43, 0xea, 0x96, 0x1b, 0xc5, 0x5f, 0xfe, 0x0f, 0xc9, 0x7f, 0xb6, 0xec, + 0x68, 0x23, 0xac, 0x2d, 0xd7, 0x17, 0xef, 0x86, 0x03, 0xb5, 0xe5, 0xfa, 0xe9, 0xb6, 0x5c, 0x7f, + 0x80, 0xb6, 0x5c, 0x1f, 0xbd, 0x0f, 0x23, 0xe2, 0x8d, 0x9e, 0xd9, 0x7a, 0x9b, 0x5a, 0xaa, 0xbc, + 0xf6, 0xc4, 0x13, 0x3f, 0x6f, 0xf3, 0xaa, 0x94, 0x09, 0x45, 0x69, 0xdf, 0x76, 0x65, 0x83, 0xe8, + 0xaf, 0x5b, 0x30, 0x29, 0x7e, 0x63, 0xf2, 0x5e, 0x87, 0x44, 0xb1, 0xe0, 0x3d, 0x3f, 0x3d, 0x78, + 0x1f, 0x44, 0x45, 0xde, 0x95, 0x4f, 0xcb, 0x63, 0xd6, 0x04, 0xf6, 0xed, 0x51, 0xaa, 0x17, 0xe8, + 0x1f, 0x5a, 0x70, 0x76, 0xd7, 0xb9, 0xcf, 0x5b, 0xe4, 0x65, 0xd8, 0x89, 0xdd, 0x40, 0xd8, 0xae, + 0xbf, 0x36, 0xd8, 0xf4, 0x77, 0x55, 0xe7, 0x9d, 0x94, 0x66, 0xae, 0x67, 0xb3, 0x50, 0xfa, 0x76, + 0x35, 0xb3, 0x5f, 0x73, 0x9b, 0x30, 0x2a, 0xd7, 0x5b, 0x86, 0xe4, 0x5d, 0xd5, 0x19, 0xeb, 0x63, + 0x9b, 0x48, 0xe8, 0x7e, 0x69, 0xb4, 0x1d, 0xb1, 0xd6, 0x1e, 0x6a, 0x3b, 0xef, 0xc2, 0xb8, 0xbe, + 0xc6, 0x1e, 0x6a, 0x5b, 0xef, 0xc1, 0x99, 0x8c, 0xb5, 0xf4, 0x50, 0x9b, 0xbc, 0x07, 0xe7, 0x73, + 0xd7, 0xc7, 0xc3, 0x6c, 0xd8, 0xfe, 0x05, 0x4b, 0x3f, 0x07, 0x4f, 0x41, 0x05, 0xbf, 0x64, 0xaa, + 0xe0, 0x2f, 0xf6, 0xde, 0x39, 0x39, 0x7a, 0xf8, 0x77, 0xf4, 0x4e, 0xd3, 0x53, 0x1d, 0xbd, 0x01, + 0xc3, 0x1e, 0x2d, 0x91, 0xc6, 0x21, 0x76, 0xff, 0x1d, 0x99, 0xf0, 0x52, 0xac, 0x3c, 0xc2, 0x82, + 0x82, 0xfd, 0x2b, 0x16, 0x94, 0x4e, 0x61, 0x24, 0xb0, 0x39, 0x12, 0xcf, 0xe5, 0x92, 0x16, 0x21, + 0xcd, 0xe6, 0xb1, 0x73, 0x6f, 0xf9, 0x7e, 0x4c, 0xfc, 0x88, 0x89, 0x8a, 0x99, 0x03, 0xf3, 0x3d, + 0x70, 0xe6, 0x56, 0xe0, 0xb4, 0x16, 0x1d, 0xcf, 0xf1, 0x9b, 0x24, 0xac, 0xf9, 0x5b, 0x7d, 0xad, + 0x94, 0x74, 0x9b, 0xa2, 0x42, 0x3f, 0x9b, 0x22, 0x7b, 0x1b, 0x90, 0xde, 0x80, 0xb0, 0xe3, 0xc4, + 0x30, 0xe2, 0xf2, 0xa6, 0xc4, 0xf0, 0x3f, 0x95, 0xcd, 0xdd, 0x75, 0xf5, 0x4c, 0xb3, 0x50, 0xe4, + 0x05, 0x58, 0x12, 0xb2, 0x5f, 0x81, 0x4c, 0xdf, 0xad, 0xfe, 0x6a, 0x03, 0xfb, 0xf3, 0x30, 0xc3, + 0x6a, 0x1e, 0x53, 0xa4, 0xb5, 0x53, 0x4a, 0xba, 0x8c, 0x90, 0x51, 0xf6, 0x57, 0x2c, 0x98, 0x5a, + 0x4b, 0xc5, 0xaf, 0xb8, 0xcc, 0xde, 0x03, 0x33, 0x74, 0xc3, 0x0d, 0x56, 0x8a, 0x05, 0xf4, 0xc4, + 0x75, 0x50, 0x7f, 0x66, 0x41, 0xe2, 0x4e, 0x79, 0x0a, 0x8c, 0xd7, 0x92, 0xc1, 0x78, 0x65, 0xea, + 0x46, 0x54, 0x77, 0xf2, 0xf8, 0x2e, 0x74, 0x53, 0xc5, 0x0e, 0xe8, 0xa1, 0x16, 0x49, 0xc8, 0x70, + 0x4f, 0xf3, 0x49, 0x33, 0xc0, 0x80, 0x8c, 0x26, 0xc0, 0x4c, 0x89, 0x14, 0xee, 0x47, 0xc4, 0x94, + 0x48, 0xf5, 0x27, 0x67, 0x87, 0xd6, 0xb5, 0x2e, 0xb3, 0x93, 0xeb, 0x3b, 0x99, 0x69, 0xb8, 0xe3, + 0xb9, 0xef, 0x13, 0x15, 0x00, 0xa5, 0x22, 0x4c, 0xbd, 0x45, 0xe9, 0xd1, 0x41, 0x65, 0x42, 0xfd, + 0xe3, 0x51, 0xb2, 0x92, 0x2a, 0xf6, 0x0d, 0x98, 0x4a, 0x0d, 0x18, 0x7a, 0x09, 0x86, 0xda, 0xdb, + 0x4e, 0x44, 0x52, 0xe6, 0x93, 0x43, 0x75, 0x5a, 0x78, 0x74, 0x50, 0x99, 0x54, 0x15, 0x58, 0x09, + 0xe6, 0xd8, 0xf6, 0xff, 0xb0, 0xa0, 0xb4, 0x16, 0xb4, 0x4e, 0x63, 0x31, 0xbd, 0x6e, 0x2c, 0xa6, + 0xc7, 0xf3, 0x62, 0x0c, 0xe6, 0xae, 0xa3, 0x95, 0xd4, 0x3a, 0xba, 0x98, 0x4b, 0xa1, 0xf7, 0x12, + 0xda, 0x85, 0x31, 0x16, 0xb9, 0x50, 0x98, 0x73, 0xbe, 0x60, 0xc8, 0x00, 0x95, 0x94, 0x0c, 0x30, + 0xa5, 0xa1, 0x6a, 0x92, 0xc0, 0xd3, 0x30, 0x22, 0x4c, 0x0a, 0xd3, 0x46, 0xf0, 0x02, 0x17, 0x4b, + 0xb8, 0xfd, 0x13, 0x45, 0x30, 0x22, 0x25, 0xa2, 0x5f, 0xb3, 0x60, 0x3e, 0xe4, 0x5e, 0x85, 0xad, + 0x6a, 0x27, 0x74, 0xfd, 0xad, 0x46, 0x73, 0x9b, 0xb4, 0x3a, 0x9e, 0xeb, 0x6f, 0xd5, 0xb6, 0xfc, + 0x40, 0x15, 0x2f, 0xdf, 0x27, 0xcd, 0x0e, 0x7b, 0x17, 0xe8, 0x13, 0x96, 0x51, 0x99, 0xec, 0x5c, + 0x3b, 0x3c, 0xa8, 0xcc, 0xe3, 0x63, 0xd1, 0xc6, 0xc7, 0xec, 0x0b, 0xfa, 0x86, 0x05, 0x57, 0x79, + 0x00, 0xc1, 0xc1, 0xfb, 0xdf, 0x43, 0x62, 0xaa, 0x4b, 0x52, 0x09, 0x91, 0x75, 0x12, 0xee, 0x2e, + 0xbe, 0x2c, 0x06, 0xf4, 0x6a, 0xfd, 0x78, 0x6d, 0xe1, 0xe3, 0x76, 0xce, 0xfe, 0x17, 0x45, 0x98, + 0x10, 0x0e, 0xed, 0x22, 0x52, 0xca, 0x4b, 0xc6, 0x92, 0x78, 0x22, 0xb5, 0x24, 0x66, 0x0c, 0xe4, + 0x93, 0x09, 0x92, 0x12, 0xc1, 0x8c, 0xe7, 0x44, 0xf1, 0x0d, 0xe2, 0x84, 0xf1, 0x06, 0x71, 0xb8, + 0x29, 0x4b, 0xf1, 0xd8, 0x66, 0x37, 0x4a, 0x45, 0x73, 0x2b, 0x4d, 0x0c, 0x77, 0xd3, 0x47, 0x7b, + 0x80, 0x98, 0x3d, 0x4e, 0xe8, 0xf8, 0x11, 0xff, 0x16, 0x57, 0xbc, 0x19, 0x1c, 0xaf, 0xd5, 0x39, + 0xd1, 0x2a, 0xba, 0xd5, 0x45, 0x0d, 0x67, 0xb4, 0xa0, 0xd9, 0x59, 0x0d, 0x0d, 0x6a, 0x67, 0x35, + 0xdc, 0xc7, 0xd3, 0xc4, 0x87, 0xe9, 0xae, 0x98, 0x04, 0x6f, 0x43, 0x59, 0xd9, 0xc3, 0x89, 0x43, + 0xa7, 0x77, 0x68, 0x8f, 0x34, 0x05, 0xae, 0x46, 0x49, 0x6c, 0x31, 0x13, 0x72, 0xf6, 0x3f, 0x2a, + 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x83, 0x51, 0x27, 0x8a, 0xdc, 0x2d, 0x9f, 0xb4, 0xc4, 0x8e, 0xfd, + 0x78, 0xde, 0x8e, 0x35, 0x9a, 0x61, 0x36, 0x89, 0x0b, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0x6e, + 0x30, 0xb4, 0x27, 0x79, 0xfe, 0xc1, 0xa8, 0x81, 0x34, 0x29, 0xda, 0x23, 0x58, 0xd4, 0x47, 0x5f, + 0xe0, 0x16, 0x5d, 0x37, 0xfd, 0xe0, 0x9e, 0x7f, 0x3d, 0x08, 0xa4, 0x17, 0xda, 0x60, 0x04, 0x67, + 0xa4, 0x1d, 0x97, 0xaa, 0x8e, 0x4d, 0x6a, 0x83, 0xc5, 0xed, 0xf9, 0x5e, 0x38, 0x43, 0x49, 0x9b, + 0xbe, 0x24, 0x11, 0x22, 0x30, 0x25, 0xa2, 0x25, 0xc8, 0x32, 0x31, 0x76, 0x99, 0xec, 0xbc, 0x59, + 0x3b, 0x51, 0xfa, 0xdd, 0x34, 0x49, 0xe0, 0x34, 0x4d, 0xfb, 0xa7, 0x2d, 0x60, 0x56, 0xf0, 0xa7, + 0xc0, 0x32, 0x7c, 0xd6, 0x64, 0x19, 0x66, 0xf3, 0x06, 0x39, 0x87, 0x5b, 0x78, 0x91, 0xaf, 0xac, + 0x7a, 0x18, 0xdc, 0xdf, 0x17, 0xaf, 0xe9, 0xfd, 0x39, 0x59, 0xfb, 0xff, 0x58, 0xfc, 0x10, 0x53, + 0x8e, 0xe9, 0xe8, 0xfb, 0x60, 0xb4, 0xe9, 0xb4, 0x9d, 0x26, 0x0f, 0xeb, 0x9b, 0xab, 0xd5, 0x31, + 0x2a, 0xcd, 0x2f, 0x89, 0x1a, 0x5c, 0x4b, 0x21, 0xa3, 0x6e, 0x8c, 0xca, 0xe2, 0xbe, 0x9a, 0x09, + 0xd5, 0xe4, 0xdc, 0x0e, 0x4c, 0x18, 0xc4, 0x1e, 0xaa, 0x48, 0xfb, 0x7d, 0xfc, 0x8a, 0x55, 0x51, + 0x62, 0x76, 0x61, 0xc6, 0xd7, 0xfe, 0xd3, 0x0b, 0x45, 0x8a, 0x29, 0x1f, 0xef, 0x77, 0x89, 0xb2, + 0xdb, 0x47, 0xb3, 0xf2, 0x4f, 0x91, 0xc1, 0xdd, 0x94, 0xed, 0x9f, 0xb4, 0xe0, 0x51, 0x1d, 0x51, + 0x8b, 0x19, 0xd0, 0x4f, 0x4f, 0x5c, 0x85, 0xd1, 0xa0, 0x4d, 0x42, 0x27, 0x0e, 0x42, 0x71, 0x6b, + 0x5c, 0x91, 0x83, 0x7e, 0x5b, 0x94, 0x1f, 0x89, 0xf8, 0x8a, 0x92, 0xba, 0x2c, 0xc7, 0xaa, 0x26, + 0x95, 0x63, 0xd8, 0x60, 0x44, 0x22, 0x9e, 0x03, 0x3b, 0x03, 0xd8, 0x93, 0x69, 0x84, 0x05, 0xc4, + 0xfe, 0x23, 0x8b, 0x2f, 0x2c, 0xbd, 0xeb, 0xe8, 0x3d, 0x98, 0xde, 0x75, 0xe2, 0xe6, 0xf6, 0xf2, + 0xfd, 0x76, 0xc8, 0xd5, 0xe3, 0x72, 0x9c, 0x9e, 0xe9, 0x37, 0x4e, 0xda, 0x47, 0x26, 0x46, 0x6a, + 0xab, 0x29, 0x62, 0xb8, 0x8b, 0x3c, 0xda, 0x80, 0x31, 0x56, 0xc6, 0xac, 0xa1, 0xa3, 0x5e, 0xac, + 0x41, 0x5e, 0x6b, 0xea, 0xd5, 0x79, 0x35, 0xa1, 0x83, 0x75, 0xa2, 0xf6, 0x97, 0x8b, 0x7c, 0xb7, + 0x33, 0x6e, 0xfb, 0x69, 0x18, 0x69, 0x07, 0xad, 0xa5, 0x5a, 0x15, 0x8b, 0x59, 0x50, 0xd7, 0x48, + 0x9d, 0x17, 0x63, 0x09, 0x47, 0xaf, 0x02, 0x90, 0xfb, 0x31, 0x09, 0x7d, 0xc7, 0x53, 0x46, 0x23, + 0xca, 0x4c, 0xb2, 0x1a, 0xac, 0x05, 0xf1, 0x9d, 0x88, 0x7c, 0xcf, 0xb2, 0x42, 0xc1, 0x1a, 0x3a, + 0xba, 0x06, 0xd0, 0x0e, 0x83, 0x3d, 0xb7, 0xc5, 0xdc, 0xeb, 0x8a, 0xa6, 0x49, 0x45, 0x5d, 0x41, + 0xb0, 0x86, 0x85, 0x5e, 0x85, 0x89, 0x8e, 0x1f, 0x71, 0x0e, 0xc5, 0xd9, 0x10, 0xd1, 0x09, 0x47, + 0x13, 0xeb, 0x86, 0x3b, 0x3a, 0x10, 0x9b, 0xb8, 0x68, 0x01, 0x86, 0x63, 0x87, 0xd9, 0x44, 0x0c, + 0xe5, 0xdb, 0x36, 0xae, 0x53, 0x0c, 0x3d, 0xa8, 0x2c, 0xad, 0x80, 0x45, 0x45, 0xf4, 0xb6, 0xf4, + 0x55, 0xe0, 0x67, 0xbd, 0x30, 0x2a, 0x1e, 0xec, 0x5e, 0xd0, 0x3c, 0x15, 0x84, 0xb1, 0xb2, 0x41, + 0xcb, 0xfe, 0x46, 0x19, 0x20, 0x61, 0xc7, 0xd1, 0xfb, 0x5d, 0xe7, 0xd1, 0xb3, 0xbd, 0x19, 0xf8, + 0x93, 0x3b, 0x8c, 0xd0, 0x0f, 0x5a, 0x30, 0xe6, 0x78, 0x5e, 0xd0, 0x74, 0x62, 0x36, 0xca, 0x85, + 0xde, 0xe7, 0xa1, 0x68, 0x7f, 0x21, 0xa9, 0xc1, 0xbb, 0xf0, 0x82, 0x5c, 0x78, 0x1a, 0xa4, 0x6f, + 0x2f, 0xf4, 0x86, 0xd1, 0xa7, 0xa4, 0x94, 0xc6, 0x97, 0xc7, 0x5c, 0x5a, 0x4a, 0x2b, 0xb3, 0xa3, + 0x5f, 0x13, 0xd0, 0xd0, 0x1d, 0x23, 0xf0, 0x5c, 0x29, 0x3f, 0x06, 0x83, 0xc1, 0x95, 0xf6, 0x8b, + 0x39, 0x87, 0xea, 0xba, 0x73, 0xd5, 0x50, 0x7e, 0xa0, 0x12, 0x4d, 0xfc, 0xe9, 0xe3, 0x58, 0xf5, + 0x2e, 0x4c, 0xb5, 0xcc, 0xbb, 0x5d, 0xac, 0xa6, 0xa7, 0xf2, 0xe8, 0xa6, 0x58, 0x81, 0xe4, 0x36, + 0x4f, 0x01, 0x70, 0x9a, 0x30, 0xaa, 0x73, 0x37, 0xb7, 0x9a, 0xbf, 0x19, 0x08, 0xe3, 0x74, 0x3b, + 0x77, 0x2e, 0xf7, 0xa3, 0x98, 0xec, 0x52, 0xcc, 0xe4, 0xd2, 0x5e, 0x13, 0x75, 0xb1, 0xa2, 0x82, + 0xde, 0x80, 0x61, 0xe6, 0x27, 0x1b, 0xcd, 0x8e, 0xe6, 0x2b, 0x13, 0xcd, 0x10, 0x0f, 0xc9, 0xa6, + 0x62, 0x7f, 0x23, 0x2c, 0x28, 0xa0, 0x1b, 0x32, 0x0e, 0x4c, 0x54, 0xf3, 0xef, 0x44, 0x84, 0xc5, + 0x81, 0x29, 0x2f, 0x7e, 0x3c, 0x09, 0xf1, 0xc2, 0xcb, 0x33, 0xc3, 0xc7, 0x1b, 0x35, 0x29, 0x73, + 0x24, 0xfe, 0xcb, 0xa8, 0xf4, 0xb3, 0x90, 0xdf, 0x3d, 0x33, 0x72, 0x7d, 0x32, 0x9c, 0x77, 0x4d, + 0x12, 0x38, 0x4d, 0x93, 0x32, 0x9a, 0x7c, 0xe7, 0x0a, 0xf3, 0xf6, 0x7e, 0xfb, 0x9f, 0xcb, 0xd7, + 0xec, 0x92, 0xe1, 0x25, 0x58, 0xd4, 0x3f, 0xd5, 0x5b, 0x7f, 0xce, 0x87, 0xe9, 0xf4, 0x16, 0x7d, + 0xa8, 0x5c, 0xc6, 0x1f, 0x96, 0x60, 0xd2, 0x5c, 0x52, 0xe8, 0x2a, 0x94, 0x05, 0x11, 0x15, 0x94, + 0x54, 0xed, 0x92, 0x55, 0x09, 0xc0, 0x09, 0x0e, 0x8b, 0x45, 0xcb, 0xaa, 0x6b, 0x66, 0x89, 0x49, + 0x2c, 0x5a, 0x05, 0xc1, 0x1a, 0x16, 0x95, 0x97, 0x36, 0x82, 0x20, 0x56, 0x97, 0x8a, 0x5a, 0x77, + 0x8b, 0xac, 0x14, 0x0b, 0x28, 0xbd, 0x4c, 0x76, 0x48, 0xe8, 0x13, 0xcf, 0x8c, 0x75, 0xa6, 0x2e, + 0x93, 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x96, 0x0c, 0x22, 0xb6, 0x90, 0x85, 0x54, 0x96, 0x98, + 0x79, 0x36, 0xb8, 0xc7, 0xb9, 0x84, 0xa3, 0xcf, 0xc3, 0xa3, 0xca, 0x41, 0x1c, 0x73, 0x45, 0xb5, + 0x6c, 0x71, 0xd8, 0x50, 0xa2, 0x3c, 0xba, 0x94, 0x8d, 0x86, 0xf3, 0xea, 0xa3, 0xd7, 0x61, 0x52, + 0x70, 0xee, 0x92, 0xe2, 0x88, 0x69, 0x3b, 0x71, 0xd3, 0x80, 0xe2, 0x14, 0xb6, 0x8c, 0xd6, 0xc6, + 0x98, 0x67, 0x49, 0x61, 0xb4, 0x3b, 0x5a, 0x9b, 0x0e, 0xc7, 0x5d, 0x35, 0xd0, 0x02, 0x4c, 0x71, + 0xd6, 0xca, 0xf5, 0xb7, 0xf8, 0x9c, 0x08, 0xef, 0x13, 0xb5, 0xa5, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, + 0x47, 0xaf, 0xc0, 0xb8, 0x13, 0x36, 0xb7, 0xdd, 0x98, 0x34, 0xe3, 0x4e, 0xc8, 0xdd, 0x52, 0x34, + 0xe3, 0x93, 0x05, 0x0d, 0x86, 0x0d, 0x4c, 0xfb, 0x7d, 0x38, 0x93, 0xe1, 0xb8, 0x46, 0x17, 0x8e, + 0xd3, 0x76, 0xe5, 0x37, 0xa5, 0x0c, 0x36, 0x17, 0xea, 0x35, 0xf9, 0x35, 0x1a, 0x16, 0x5d, 0x9d, + 0xcc, 0xc1, 0x4d, 0x4b, 0x42, 0xa1, 0x56, 0xe7, 0x8a, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x9f, 0x05, + 0x98, 0xca, 0x50, 0xbe, 0xb3, 0x44, 0x08, 0x29, 0xd9, 0x23, 0xc9, 0x7b, 0x60, 0x06, 0xff, 0x2b, + 0x1c, 0x23, 0xf8, 0x5f, 0xb1, 0x5f, 0xf0, 0xbf, 0xd2, 0x07, 0x09, 0xfe, 0x67, 0x8e, 0xd8, 0xd0, + 0x40, 0x23, 0x96, 0x11, 0x30, 0x70, 0xf8, 0x98, 0x01, 0x03, 0x8d, 0x41, 0x1f, 0x19, 0x60, 0xd0, + 0xbf, 0x56, 0x80, 0xe9, 0xb4, 0x91, 0xdc, 0x29, 0xa8, 0x63, 0xdf, 0x30, 0xd4, 0xb1, 0xd9, 0x69, + 0x45, 0xd2, 0xa6, 0x7b, 0x79, 0xaa, 0x59, 0x9c, 0x52, 0xcd, 0x7e, 0x72, 0x20, 0x6a, 0xbd, 0xd5, + 0xb4, 0x7f, 0xa7, 0x00, 0xe7, 0xd2, 0x55, 0x96, 0x3c, 0xc7, 0xdd, 0x3d, 0x85, 0xb1, 0xb9, 0x6d, + 0x8c, 0xcd, 0x73, 0x83, 0x7c, 0x0d, 0xeb, 0x5a, 0xee, 0x00, 0xbd, 0x95, 0x1a, 0xa0, 0xab, 0x83, + 0x93, 0xec, 0x3d, 0x4a, 0xdf, 0x2c, 0xc2, 0xc5, 0xcc, 0x7a, 0x89, 0x36, 0x73, 0xc5, 0xd0, 0x66, + 0x5e, 0x4b, 0x69, 0x33, 0xed, 0xde, 0xb5, 0x4f, 0x46, 0xbd, 0x29, 0x3c, 0x0a, 0x59, 0x80, 0xb8, + 0x07, 0x54, 0x6d, 0x1a, 0x1e, 0x85, 0x8a, 0x10, 0x36, 0xe9, 0x7e, 0x2b, 0xa9, 0x34, 0xff, 0x95, + 0x05, 0xe7, 0x33, 0xe7, 0xe6, 0x14, 0x54, 0x58, 0x6b, 0xa6, 0x0a, 0xeb, 0xe9, 0x81, 0x57, 0x6b, + 0x8e, 0x4e, 0xeb, 0xb7, 0x4a, 0x39, 0xdf, 0xc2, 0x04, 0xf4, 0xdb, 0x30, 0xe6, 0x34, 0x9b, 0x24, + 0x8a, 0x56, 0x83, 0x96, 0x0a, 0x98, 0xf6, 0x1c, 0x93, 0xb3, 0x92, 0xe2, 0xa3, 0x83, 0xca, 0x5c, + 0x9a, 0x44, 0x02, 0xc6, 0x3a, 0x05, 0x33, 0xc6, 0x63, 0xe1, 0x44, 0x63, 0x3c, 0x5e, 0x03, 0xd8, + 0x53, 0xdc, 0x7a, 0x5a, 0xc8, 0xd7, 0xf8, 0x78, 0x0d, 0x0b, 0x7d, 0x01, 0x46, 0x23, 0x71, 0x8d, + 0x8b, 0xa5, 0xf8, 0xc2, 0x80, 0x73, 0xe5, 0x6c, 0x10, 0xcf, 0x74, 0x5d, 0x57, 0xfa, 0x10, 0x45, + 0x12, 0x7d, 0x17, 0x4c, 0x47, 0x3c, 0x32, 0xca, 0x92, 0xe7, 0x44, 0xcc, 0x0f, 0x42, 0xac, 0x42, + 0xe6, 0x8f, 0xde, 0x48, 0xc1, 0x70, 0x17, 0x36, 0x5a, 0x91, 0x1f, 0xc5, 0xc2, 0xb8, 0xf0, 0x85, + 0x79, 0x39, 0xf9, 0x20, 0x91, 0x86, 0xe9, 0x6c, 0x7a, 0xf8, 0xd9, 0xc0, 0x6b, 0x35, 0xd1, 0x17, + 0x00, 0xe8, 0xf2, 0x11, 0xba, 0x84, 0x91, 0xfc, 0xc3, 0x93, 0x9e, 0x2a, 0xad, 0x4c, 0xcb, 0x4f, + 0xe6, 0xcb, 0x57, 0x55, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0x5a, 0x09, 0x1e, 0xeb, 0x71, 0x46, 0xa2, + 0x05, 0xf3, 0x09, 0xf4, 0x99, 0xb4, 0x70, 0x3d, 0x97, 0x59, 0xd9, 0x90, 0xb6, 0x53, 0x4b, 0xb1, + 0xf0, 0x81, 0x97, 0xe2, 0x0f, 0x59, 0x9a, 0xda, 0x83, 0x1b, 0xf3, 0x7d, 0xf6, 0x98, 0x67, 0xff, + 0x09, 0xea, 0x41, 0x36, 0x33, 0x94, 0x09, 0xd7, 0x06, 0xee, 0xce, 0xc0, 0xda, 0x85, 0xd3, 0x55, + 0xfe, 0x7e, 0xd9, 0x82, 0x27, 0x32, 0xfb, 0x6b, 0x98, 0x6c, 0x5c, 0x85, 0x72, 0x93, 0x16, 0x6a, + 0xae, 0x5b, 0x89, 0x4f, 0xab, 0x04, 0xe0, 0x04, 0xc7, 0xb0, 0xcc, 0x28, 0xf4, 0xb5, 0xcc, 0xf8, + 0xe7, 0x16, 0x74, 0xed, 0x8f, 0x53, 0x38, 0xa8, 0x6b, 0xe6, 0x41, 0xfd, 0xf1, 0x41, 0xe6, 0x32, + 0xe7, 0x8c, 0xfe, 0x4f, 0x53, 0xf0, 0x48, 0x8e, 0xaf, 0xc6, 0x1e, 0xcc, 0x6c, 0x35, 0x89, 0xe9, + 0x14, 0x27, 0x3e, 0x26, 0xd3, 0x7f, 0xb0, 0xa7, 0x07, 0x1d, 0x4b, 0xcf, 0x33, 0xd3, 0x85, 0x82, + 0xbb, 0x9b, 0x40, 0x5f, 0xb6, 0xe0, 0xac, 0x73, 0x2f, 0xea, 0x4a, 0xc2, 0x28, 0xd6, 0xcc, 0x8b, + 0x99, 0x4a, 0x90, 0x3e, 0x49, 0x1b, 0x79, 0xbe, 0xa2, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xc2, 0x22, + 0x84, 0x26, 0x65, 0xe7, 0x7b, 0xb8, 0x6d, 0x66, 0x39, 0xd5, 0xf0, 0x23, 0x5b, 0x42, 0xb0, 0xa2, + 0x83, 0xee, 0x42, 0x79, 0x4b, 0x7a, 0xba, 0x89, 0x2b, 0x21, 0xf3, 0x8e, 0xcd, 0x74, 0x87, 0xe3, + 0xcf, 0x92, 0x0a, 0x84, 0x13, 0x52, 0xe8, 0x75, 0x28, 0xfa, 0x9b, 0x51, 0xaf, 0x44, 0x3f, 0x29, + 0x4b, 0x26, 0xee, 0x12, 0xbd, 0xb6, 0xd2, 0xc0, 0xb4, 0x22, 0xba, 0x01, 0xc5, 0x70, 0xa3, 0x25, + 0xf4, 0x76, 0x99, 0x27, 0x37, 0x5e, 0xac, 0x66, 0x2f, 0x12, 0x4e, 0x09, 0x2f, 0x56, 0x31, 0x25, + 0x81, 0xea, 0x30, 0xc4, 0xdc, 0x1a, 0xc4, 0x2d, 0x90, 0xc9, 0xef, 0xf6, 0x70, 0x0f, 0xe2, 0x7e, + 0xd3, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x1d, 0x86, 0x9b, 0x2c, 0x29, 0x8c, 0x88, 0xda, 0xfc, 0xa9, + 0x4c, 0x0d, 0x5d, 0x8f, 0x6c, 0x39, 0x42, 0x61, 0xc5, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xf6, + 0xf6, 0x66, 0xc4, 0x24, 0xfc, 0x3c, 0xaa, 0x3d, 0x92, 0x40, 0x09, 0xaa, 0x0c, 0x03, 0x0b, 0x5a, + 0xe8, 0x33, 0x50, 0xd8, 0x6c, 0x0a, 0xaf, 0x87, 0x4c, 0x55, 0x9d, 0xe9, 0xd5, 0xbe, 0x38, 0x7c, + 0x78, 0x50, 0x29, 0xac, 0x2c, 0xe1, 0xc2, 0x66, 0x13, 0xad, 0xc1, 0xc8, 0x26, 0xf7, 0x83, 0x15, + 0xda, 0xb8, 0xa7, 0xb2, 0x5d, 0x74, 0xbb, 0x5c, 0x65, 0xb9, 0xb5, 0xbe, 0x00, 0x60, 0x49, 0x84, + 0xc5, 0xa1, 0x54, 0xfe, 0xbc, 0x22, 0x20, 0xf3, 0xfc, 0xf1, 0x7c, 0xb0, 0xf9, 0xad, 0x9c, 0x78, + 0x05, 0x63, 0x8d, 0x22, 0xfa, 0x12, 0x94, 0x1d, 0x99, 0xfe, 0x4f, 0x04, 0xac, 0x78, 0x21, 0x73, + 0x63, 0xf6, 0xce, 0x8c, 0xc8, 0x57, 0xb5, 0x42, 0xc2, 0x09, 0x51, 0xb4, 0x03, 0x13, 0x7b, 0x51, + 0x7b, 0x9b, 0xc8, 0x8d, 0xcc, 0xe2, 0x57, 0xe4, 0x5c, 0x5c, 0x77, 0x05, 0xa2, 0x1b, 0xc6, 0x1d, + 0xc7, 0xeb, 0x3a, 0x7b, 0xd8, 0x5b, 0xf6, 0x5d, 0x9d, 0x18, 0x36, 0x69, 0xd3, 0xe1, 0x7f, 0xaf, + 0x13, 0x6c, 0xec, 0xc7, 0x44, 0x44, 0x70, 0xce, 0x1c, 0xfe, 0x37, 0x39, 0x4a, 0xf7, 0xf0, 0x0b, + 0x00, 0x96, 0x44, 0xe8, 0x56, 0x77, 0x64, 0x6a, 0x4d, 0x16, 0xb9, 0x39, 0x67, 0xab, 0x67, 0xe6, + 0xdf, 0xd4, 0x06, 0x85, 0x9d, 0x91, 0x09, 0x29, 0x76, 0x36, 0xb6, 0xb7, 0x83, 0x38, 0xf0, 0x53, + 0xe7, 0xf2, 0x4c, 0xfe, 0xd9, 0x58, 0xcf, 0xc0, 0xef, 0x3e, 0x1b, 0xb3, 0xb0, 0x70, 0x66, 0x5b, + 0xa8, 0x05, 0x93, 0xed, 0x20, 0x8c, 0xef, 0x05, 0xa1, 0x5c, 0x5f, 0xa8, 0x87, 0x36, 0xc1, 0xc0, + 0x14, 0x2d, 0xb2, 0x88, 0xe2, 0x26, 0x04, 0xa7, 0x68, 0xa2, 0xcf, 0xc1, 0x48, 0xd4, 0x74, 0x3c, + 0x52, 0xbb, 0x3d, 0x7b, 0x26, 0xff, 0xd2, 0x69, 0x70, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47, 0xa0, + 0x60, 0x49, 0x0e, 0xad, 0xc0, 0x10, 0x4b, 0x0b, 0xc0, 0x82, 0x4f, 0xe7, 0x04, 0x46, 0xea, 0xb2, + 0x2b, 0xe5, 0x67, 0x13, 0x2b, 0xc6, 0xbc, 0x3a, 0xdd, 0x03, 0x82, 0xa9, 0x0e, 0xa2, 0xd9, 0x73, + 0xf9, 0x7b, 0x40, 0xf0, 0xe2, 0xb7, 0x1b, 0xbd, 0xf6, 0x80, 0x42, 0xc2, 0x09, 0x51, 0x7a, 0x32, + 0xd3, 0xd3, 0xf4, 0x91, 0x1e, 0x66, 0x2c, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a, 0x49, + 0xd8, 0xbf, 0x3f, 0xd2, 0xcd, 0xa9, 0x30, 0x31, 0xec, 0x2f, 0x5a, 0x5d, 0x2f, 0x74, 0x9f, 0x1e, + 0x54, 0x2b, 0x74, 0x82, 0x3c, 0xea, 0x97, 0x2d, 0x78, 0xa4, 0x9d, 0xf9, 0x21, 0xe2, 0xda, 0x1f, + 0x4c, 0xb9, 0xc4, 0x3f, 0x5d, 0x05, 0x88, 0xcf, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x72, 0x40, 0xf1, + 0x03, 0xcb, 0x01, 0xab, 0x30, 0xca, 0x58, 0xcb, 0x3e, 0x49, 0xd2, 0xd2, 0xe2, 0x10, 0x63, 0x20, + 0x96, 0x44, 0x45, 0xac, 0x48, 0xa0, 0x1f, 0xb6, 0xe0, 0x42, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x08, + 0xa7, 0xce, 0x25, 0xc0, 0x15, 0xf1, 0xfd, 0x17, 0xea, 0xbd, 0x90, 0x8f, 0xfa, 0x21, 0xe0, 0xde, + 0x8d, 0xa1, 0x6a, 0x86, 0x08, 0x3a, 0x6c, 0xaa, 0xdd, 0x07, 0x10, 0x43, 0x5f, 0x84, 0xf1, 0xdd, + 0xa0, 0xe3, 0xc7, 0xc2, 0xea, 0x45, 0xf8, 0x29, 0xb2, 0x67, 0xe6, 0x55, 0xad, 0x1c, 0x1b, 0x58, + 0x29, 0xe1, 0x75, 0xf4, 0x81, 0x85, 0xd7, 0x77, 0x52, 0xa9, 0xb0, 0xcb, 0xf9, 0x61, 0xfb, 0x84, + 0x9c, 0x7f, 0x8c, 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0xac, 0x95, 0xc1, 0xca, 0x73, 0x19, 0xf9, + 0x35, 0x53, 0x46, 0xbe, 0x9c, 0x96, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, + 0x41, 0x83, 0xa9, 0xd9, 0x1e, 0x5c, 0xea, 0x77, 0x2d, 0x31, 0xf3, 0xa7, 0x96, 0x7a, 0x60, 0x4b, + 0xcc, 0x9f, 0x5a, 0xb5, 0x2a, 0x66, 0x90, 0x41, 0xa3, 0x6d, 0xd8, 0xff, 0xd5, 0x82, 0x62, 0x3d, + 0x68, 0x9d, 0x82, 0x0a, 0xf9, 0xb3, 0x86, 0x0a, 0xf9, 0xb1, 0x9c, 0x14, 0xe5, 0xb9, 0x0a, 0xe3, + 0xe5, 0x94, 0xc2, 0xf8, 0x42, 0x1e, 0x81, 0xde, 0xea, 0xe1, 0x9f, 0x2a, 0x82, 0x9e, 0x50, 0x1d, + 0xfd, 0xd6, 0x83, 0xd8, 0x1e, 0x17, 0x7b, 0xe5, 0x58, 0x17, 0x94, 0x99, 0xd5, 0x94, 0x74, 0xbd, + 0xfb, 0x73, 0x66, 0x82, 0xfc, 0x16, 0x71, 0xb7, 0xb6, 0x63, 0xd2, 0x4a, 0x7f, 0xce, 0xe9, 0x99, + 0x20, 0xff, 0x67, 0x0b, 0xa6, 0x52, 0xad, 0x23, 0x0f, 0x26, 0x3c, 0x5d, 0xff, 0x27, 0xd6, 0xe9, + 0x03, 0xa9, 0x0e, 0x85, 0x09, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x1e, 0x40, 0xbd, 0xcf, 0x49, + 0xbd, 0x17, 0xe3, 0xfa, 0xd5, 0x03, 0x5e, 0x84, 0x35, 0x0c, 0xf4, 0x12, 0x8c, 0xc5, 0x41, 0x3b, + 0xf0, 0x82, 0xad, 0xfd, 0x9b, 0x44, 0xc6, 0x77, 0x51, 0x86, 0x59, 0xeb, 0x09, 0x08, 0xeb, 0x78, + 0xf6, 0xcf, 0x14, 0x21, 0x9d, 0x84, 0xff, 0xdb, 0x6b, 0xf2, 0xa3, 0xb9, 0x26, 0xbf, 0x69, 0xc1, + 0x34, 0x6d, 0x9d, 0x19, 0x89, 0xc8, 0xcb, 0x56, 0xe5, 0xa0, 0xb1, 0x7a, 0xe4, 0xa0, 0xb9, 0x4c, + 0xcf, 0xae, 0x56, 0xd0, 0x89, 0x85, 0xde, 0x4c, 0x3b, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, + 0xc2, 0x50, 0x78, 0x3e, 0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x14, 0x35, 0xa5, 0x9c, 0x14, + 0x35, 0x2c, 0x5a, 0x9d, 0x30, 0x27, 0x10, 0x6c, 0x8f, 0x16, 0xad, 0x4e, 0xda, 0x19, 0x24, 0x38, + 0xf6, 0x2f, 0x14, 0x61, 0xbc, 0x1e, 0xb4, 0x92, 0x17, 0xb2, 0x17, 0x8d, 0x17, 0xb2, 0x4b, 0xa9, + 0x17, 0xb2, 0x69, 0x1d, 0xf7, 0xdb, 0xef, 0x61, 0x1f, 0xd6, 0x7b, 0xd8, 0x3f, 0xb3, 0xd8, 0xac, + 0x55, 0xd7, 0x1a, 0x22, 0x45, 0xee, 0xf3, 0x30, 0xc6, 0x0e, 0x24, 0xe6, 0x6a, 0x27, 0x9f, 0x8d, + 0x58, 0xf4, 0xf9, 0xb5, 0xa4, 0x18, 0xeb, 0x38, 0xe8, 0x0a, 0x8c, 0x46, 0xc4, 0x09, 0x9b, 0xdb, + 0xea, 0x8c, 0x13, 0x8f, 0x2a, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x94, 0x56, 0xcc, 0x4f, + 0xf6, 0xaa, 0xf7, 0x87, 0x6f, 0x91, 0xfc, 0xe8, 0x68, 0xf6, 0x5b, 0x80, 0xba, 0xf1, 0x07, 0x08, + 0x89, 0x54, 0x31, 0x43, 0x22, 0x95, 0xbb, 0xc2, 0x21, 0xfd, 0xa9, 0x05, 0x93, 0xf5, 0xa0, 0x45, + 0xb7, 0xee, 0xb7, 0xd2, 0x3e, 0xd5, 0xa3, 0x44, 0x0e, 0xf7, 0x88, 0x12, 0xf9, 0x77, 0x2d, 0x18, + 0xa9, 0x07, 0xad, 0x53, 0xd0, 0xb6, 0xbf, 0x66, 0x6a, 0xdb, 0x1f, 0xcd, 0x59, 0x12, 0x39, 0x0a, + 0xf6, 0x5f, 0x2a, 0xc2, 0x04, 0xed, 0x67, 0xb0, 0x25, 0x67, 0xc9, 0x18, 0x11, 0x6b, 0x80, 0x11, + 0xa1, 0x6c, 0x6e, 0xe0, 0x79, 0xc1, 0xbd, 0xf4, 0x8c, 0xad, 0xb0, 0x52, 0x2c, 0xa0, 0xe8, 0x59, + 0x18, 0x6d, 0x87, 0x64, 0xcf, 0x0d, 0x04, 0xff, 0xa8, 0xbd, 0x5d, 0xd4, 0x45, 0x39, 0x56, 0x18, + 0x54, 0xee, 0x8a, 0x5c, 0xbf, 0x49, 0x64, 0xa6, 0xe9, 0x12, 0x4b, 0x46, 0xc5, 0xc3, 0x3f, 0x6b, + 0xe5, 0xd8, 0xc0, 0x42, 0x6f, 0x41, 0x99, 0xfd, 0x67, 0x27, 0xca, 0xf1, 0x93, 0xe7, 0x88, 0x9c, + 0x0b, 0x82, 0x00, 0x4e, 0x68, 0xa1, 0x6b, 0x00, 0xb1, 0x0c, 0x11, 0x1c, 0x89, 0xc8, 0x36, 0x8a, + 0xd7, 0x56, 0xc1, 0x83, 0x23, 0xac, 0x61, 0xa1, 0x67, 0xa0, 0x1c, 0x3b, 0xae, 0x77, 0xcb, 0xf5, + 0x49, 0xc4, 0x54, 0xce, 0x45, 0x99, 0x52, 0x41, 0x14, 0xe2, 0x04, 0x4e, 0x79, 0x1d, 0xe6, 0xf6, + 0xcd, 0x53, 0x6f, 0x8d, 0x32, 0x6c, 0xc6, 0xeb, 0xdc, 0x52, 0xa5, 0x58, 0xc3, 0xb0, 0x5f, 0x81, + 0x73, 0xf5, 0xa0, 0x55, 0x0f, 0xc2, 0x78, 0x25, 0x08, 0xef, 0x39, 0x61, 0x4b, 0xce, 0x5f, 0x45, + 0x46, 0xf7, 0xa7, 0x67, 0xcf, 0x10, 0xdf, 0x99, 0x46, 0xdc, 0xfe, 0x17, 0x18, 0xb7, 0x73, 0x4c, + 0x57, 0x8e, 0x26, 0xbb, 0x77, 0x55, 0x96, 0xbd, 0xeb, 0x4e, 0x4c, 0xd0, 0x6d, 0x96, 0x99, 0x2b, + 0xb9, 0x82, 0x44, 0xf5, 0xa7, 0xb5, 0xcc, 0x5c, 0x09, 0x30, 0xf3, 0xce, 0x32, 0xeb, 0xdb, 0xbf, + 0x5e, 0x64, 0xa7, 0x51, 0x2a, 0xe9, 0x1c, 0xfa, 0x22, 0x4c, 0x46, 0xe4, 0x96, 0xeb, 0x77, 0xee, + 0x4b, 0x21, 0xbc, 0x87, 0x33, 0x4e, 0x63, 0x59, 0xc7, 0xe4, 0xaa, 0x3c, 0xb3, 0x0c, 0xa7, 0xa8, + 0xd1, 0x79, 0x0a, 0x3b, 0xfe, 0x42, 0x74, 0x27, 0x22, 0xa1, 0x48, 0x7a, 0xc6, 0xe6, 0x09, 0xcb, + 0x42, 0x9c, 0xc0, 0xe9, 0xba, 0x64, 0x7f, 0xd6, 0x02, 0x1f, 0x07, 0x41, 0x2c, 0x57, 0x32, 0x4b, + 0x9b, 0xa3, 0x95, 0x63, 0x03, 0x0b, 0xad, 0x00, 0x8a, 0x3a, 0xed, 0xb6, 0xc7, 0x9e, 0xf3, 0x1d, + 0xef, 0x7a, 0x18, 0x74, 0xda, 0xfc, 0xad, 0xb3, 0xb8, 0xf8, 0x08, 0xbd, 0xc2, 0x1a, 0x5d, 0x50, + 0x9c, 0x51, 0x83, 0x9e, 0x3e, 0x9b, 0x11, 0xfb, 0xcd, 0x56, 0x77, 0x51, 0xa8, 0xd7, 0x1b, 0xac, + 0x08, 0x4b, 0x18, 0x5d, 0x4c, 0xac, 0x79, 0x8e, 0x39, 0x9c, 0x2c, 0x26, 0xac, 0x4a, 0xb1, 0x86, + 0x81, 0x96, 0x61, 0x24, 0xda, 0x8f, 0x9a, 0xb1, 0x88, 0xc3, 0x94, 0x93, 0xbe, 0xb2, 0xc1, 0x50, + 0xb4, 0x94, 0x0a, 0xbc, 0x0a, 0x96, 0x75, 0xed, 0xef, 0x63, 0x97, 0x21, 0x4b, 0x91, 0x15, 0x77, + 0x42, 0x82, 0x76, 0x61, 0xa2, 0xcd, 0xa6, 0x5c, 0x04, 0x70, 0x16, 0xf3, 0xf6, 0xe2, 0x80, 0x52, + 0xed, 0x3d, 0x7a, 0xd0, 0x28, 0xad, 0x13, 0x13, 0x17, 0xea, 0x3a, 0x39, 0x6c, 0x52, 0xb7, 0xbf, + 0x86, 0xd8, 0x99, 0xdb, 0xe0, 0xa2, 0xea, 0x88, 0x30, 0x28, 0x16, 0x7c, 0xf9, 0x5c, 0xbe, 0xce, + 0x24, 0xf9, 0x22, 0x61, 0x94, 0x8c, 0x65, 0x5d, 0xf4, 0x26, 0x7b, 0x9b, 0xe6, 0x07, 0x5d, 0xbf, + 0x4c, 0xc5, 0x1c, 0xcb, 0x78, 0x86, 0x16, 0x15, 0xb1, 0x46, 0x04, 0xdd, 0x82, 0x09, 0x91, 0x51, + 0x49, 0x28, 0xc5, 0x8a, 0x86, 0xd2, 0x63, 0x02, 0xeb, 0xc0, 0xa3, 0x74, 0x01, 0x36, 0x2b, 0xa3, + 0x2d, 0xb8, 0xa0, 0xa5, 0x17, 0xbc, 0x1e, 0x3a, 0xec, 0xbd, 0xd2, 0x65, 0x9b, 0x48, 0x3b, 0x37, + 0x9f, 0x38, 0x3c, 0xa8, 0x5c, 0x58, 0xef, 0x85, 0x88, 0x7b, 0xd3, 0x41, 0xb7, 0xe1, 0x1c, 0xf7, + 0xdb, 0xab, 0x12, 0xa7, 0xe5, 0xb9, 0xbe, 0x3a, 0x98, 0xf9, 0x3a, 0x3c, 0x7f, 0x78, 0x50, 0x39, + 0xb7, 0x90, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0xd7, 0xa0, 0xdc, 0xf2, 0x23, 0x31, 0x06, 0xc3, 0x46, + 0xe6, 0xcc, 0x72, 0x75, 0xad, 0xa1, 0xbe, 0x3f, 0xf9, 0x83, 0x93, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, + 0x94, 0x1c, 0x3a, 0x92, 0x9f, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, + 0xab, 0xe1, 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x00, 0xa2, 0x8c, 0x9a, 0xdb, 0x24, 0x0b, 0x4d, 0x16, + 0x47, 0x9b, 0xe9, 0x11, 0x47, 0x0d, 0x4f, 0x09, 0xd4, 0xe8, 0xc2, 0xc0, 0x19, 0xb5, 0xd0, 0x0d, + 0x7a, 0x90, 0xe9, 0xa5, 0xc2, 0x82, 0x57, 0x32, 0xf7, 0xb3, 0x55, 0xd2, 0x0e, 0x49, 0xd3, 0x89, + 0x49, 0xcb, 0xa4, 0x88, 0x53, 0xf5, 0xe8, 0x5d, 0xaa, 0x52, 0xea, 0x80, 0x19, 0x2c, 0xa3, 0x3b, + 0xad, 0x0e, 0x95, 0x8b, 0xb7, 0x83, 0x28, 0x5e, 0x23, 0xf1, 0xbd, 0x20, 0xdc, 0x11, 0xb1, 0xc9, + 0x92, 0x30, 0x99, 0x09, 0x08, 0xeb, 0x78, 0x94, 0x0f, 0x66, 0x8f, 0xc3, 0xb5, 0x2a, 0x7b, 0xa1, + 0x1b, 0x4d, 0xf6, 0xc9, 0x0d, 0x5e, 0x8c, 0x25, 0x5c, 0xa2, 0xd6, 0xea, 0x4b, 0xec, 0xb5, 0x2d, + 0x85, 0x5a, 0xab, 0x2f, 0x61, 0x09, 0x47, 0xa4, 0x3b, 0x2b, 0xe9, 0x64, 0xbe, 0x56, 0xb3, 0xfb, + 0x3a, 0x18, 0x30, 0x31, 0xa9, 0x0f, 0xd3, 0x2a, 0x1f, 0x2a, 0x0f, 0xda, 0x16, 0xcd, 0x4e, 0xb1, + 0x45, 0x32, 0x78, 0xc4, 0x37, 0xa5, 0x27, 0xae, 0xa5, 0x28, 0xe1, 0x2e, 0xda, 0x46, 0xf8, 0x92, + 0xe9, 0xbe, 0x29, 0x91, 0xae, 0x42, 0x39, 0xea, 0x6c, 0xb4, 0x82, 0x5d, 0xc7, 0xf5, 0xd9, 0xe3, + 0x98, 0xc6, 0x64, 0x35, 0x24, 0x00, 0x27, 0x38, 0x68, 0x05, 0x46, 0x1d, 0xa9, 0x04, 0x46, 0xf9, + 0xb1, 0x0a, 0x94, 0xea, 0x97, 0xbb, 0xef, 0x4a, 0xb5, 0xaf, 0xaa, 0x8b, 0x5e, 0x85, 0x09, 0xe1, + 0xad, 0xc5, 0x23, 0x38, 0xb0, 0xc7, 0x2b, 0xcd, 0x1c, 0xbf, 0xa1, 0x03, 0xb1, 0x89, 0x8b, 0xbe, + 0x00, 0x93, 0x94, 0x4a, 0x72, 0xb0, 0xcd, 0x9e, 0x1d, 0xe4, 0x44, 0xd4, 0x52, 0x5d, 0xe8, 0x95, + 0x71, 0x8a, 0x18, 0x6a, 0xc1, 0xe3, 0x4e, 0x27, 0x0e, 0x98, 0x22, 0xdd, 0x5c, 0xff, 0xeb, 0xc1, + 0x0e, 0xf1, 0xd9, 0x1b, 0xd6, 0xe8, 0xe2, 0xa5, 0xc3, 0x83, 0xca, 0xe3, 0x0b, 0x3d, 0xf0, 0x70, + 0x4f, 0x2a, 0xe8, 0x0e, 0x8c, 0xc5, 0x81, 0xc7, 0x0c, 0xe3, 0x29, 0x2b, 0xf1, 0x48, 0x7e, 0xf8, + 0x9f, 0x75, 0x85, 0xa6, 0x2b, 0x91, 0x54, 0x55, 0xac, 0xd3, 0x41, 0xeb, 0x7c, 0x8f, 0xb1, 0xc0, + 0xa8, 0x24, 0x9a, 0x7d, 0x34, 0x7f, 0x60, 0x54, 0xfc, 0x54, 0x73, 0x0b, 0x8a, 0x9a, 0x58, 0x27, + 0x83, 0xae, 0xc3, 0x4c, 0x3b, 0x74, 0x03, 0xb6, 0xb0, 0xd5, 0x23, 0xc6, 0xac, 0x99, 0xdd, 0xa0, + 0x9e, 0x46, 0xc0, 0xdd, 0x75, 0xa8, 0x90, 0x29, 0x0b, 0x67, 0xcf, 0xf3, 0x54, 0x59, 0x9c, 0xf1, + 0xe6, 0x65, 0x58, 0x41, 0xd1, 0x2a, 0x3b, 0x97, 0xb9, 0x38, 0x38, 0x3b, 0x97, 0x1f, 0xe3, 0x41, + 0x17, 0x1b, 0x39, 0xbf, 0xa4, 0xfe, 0xe2, 0x84, 0x02, 0xbd, 0x37, 0xa2, 0x6d, 0x27, 0x24, 0xf5, + 0x30, 0x68, 0x12, 0xde, 0x19, 0x6e, 0x93, 0xff, 0x18, 0x8f, 0xdf, 0x48, 0xef, 0x8d, 0x46, 0x16, + 0x02, 0xce, 0xae, 0x87, 0x5a, 0x5a, 0x86, 0x68, 0xca, 0x86, 0x46, 0xb3, 0x8f, 0xf7, 0x30, 0x33, + 0x4a, 0xf1, 0xac, 0xc9, 0x5a, 0x34, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x0b, 0xa6, 0x45, 0xb8, + 0xa3, 0x64, 0xdc, 0x2f, 0x24, 0xf6, 0x8b, 0x38, 0x05, 0xc3, 0x5d, 0xd8, 0x94, 0xe5, 0x23, 0xbe, + 0xb3, 0xe1, 0x11, 0xb1, 0x08, 0x6f, 0xb9, 0xfe, 0x4e, 0x34, 0x7b, 0x91, 0x7d, 0x35, 0x63, 0xf9, + 0x96, 0xbb, 0xa0, 0x38, 0xa3, 0xc6, 0xdc, 0x77, 0xc2, 0x4c, 0xd7, 0xcd, 0x75, 0xac, 0x20, 0xe6, + 0x7f, 0x32, 0x04, 0x65, 0xa5, 0x94, 0x47, 0x57, 0xcd, 0xb7, 0x96, 0xf3, 0xe9, 0xb7, 0x96, 0x51, + 0x2a, 0x1b, 0xe8, 0xcf, 0x2b, 0xeb, 0x86, 0x79, 0x5e, 0x21, 0x3f, 0x65, 0x98, 0xce, 0xdd, 0xf7, + 0x75, 0xf5, 0xd3, 0x74, 0x2c, 0xc5, 0x81, 0x1f, 0x6d, 0x4a, 0x3d, 0xd5, 0x36, 0x03, 0x66, 0xec, + 0x45, 0x4f, 0x52, 0x01, 0xa9, 0x55, 0xab, 0xa7, 0x53, 0x58, 0xd6, 0x69, 0x21, 0xe6, 0x30, 0x26, + 0x48, 0x52, 0x36, 0x8b, 0x09, 0x92, 0x23, 0x0f, 0x28, 0x48, 0x4a, 0x02, 0x38, 0xa1, 0x85, 0x3c, + 0x98, 0x69, 0x9a, 0xd9, 0x47, 0x95, 0x7b, 0xdf, 0x93, 0x7d, 0xf3, 0x80, 0x76, 0xb4, 0x54, 0x6f, + 0x4b, 0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x7a, 0x15, 0x46, 0xdf, 0x0b, 0x22, 0xb6, 0x28, 0x05, 0xaf, + 0x21, 0xdd, 0xa0, 0x46, 0xdf, 0xbc, 0xdd, 0x60, 0xe5, 0x47, 0x07, 0x95, 0xb1, 0x7a, 0xd0, 0x92, + 0x7f, 0xb1, 0xaa, 0x80, 0xee, 0xc3, 0x39, 0xe3, 0x84, 0x56, 0xdd, 0x85, 0xc1, 0xbb, 0x7b, 0x41, + 0x34, 0x77, 0xae, 0x96, 0x45, 0x09, 0x67, 0x37, 0x40, 0x8f, 0x3d, 0x3f, 0x10, 0x99, 0x7b, 0x25, + 0x3f, 0xc3, 0xd8, 0x96, 0xb2, 0xee, 0x04, 0x9f, 0x42, 0xc0, 0xdd, 0x75, 0xec, 0x5f, 0xe5, 0x6f, + 0x18, 0x42, 0xd3, 0x49, 0xa2, 0x8e, 0x77, 0x1a, 0x89, 0xa1, 0x96, 0x0d, 0x25, 0xec, 0x03, 0xbf, + 0x93, 0xfd, 0xa6, 0xc5, 0xde, 0xc9, 0xd6, 0xc9, 0x6e, 0xdb, 0xa3, 0xf2, 0xf6, 0xc3, 0xef, 0xf8, + 0x9b, 0x30, 0x1a, 0x8b, 0xd6, 0x7a, 0xe5, 0xb2, 0xd2, 0x3a, 0xc5, 0xde, 0x0a, 0x15, 0xa7, 0x23, + 0x4b, 0xb1, 0x22, 0x63, 0xff, 0x13, 0x3e, 0x03, 0x12, 0x72, 0x0a, 0x0a, 0xb1, 0xaa, 0xa9, 0x10, + 0xab, 0xf4, 0xf9, 0x82, 0x1c, 0xc5, 0xd8, 0x3f, 0x36, 0xfb, 0xcd, 0x84, 0xca, 0x8f, 0xfa, 0x03, + 0xad, 0xfd, 0xa3, 0x16, 0x9c, 0xcd, 0xb2, 0x68, 0xa2, 0xdc, 0x29, 0x17, 0x69, 0xd5, 0x83, 0xb5, + 0x1a, 0xc1, 0xbb, 0xa2, 0x1c, 0x2b, 0x8c, 0x81, 0xd3, 0x44, 0x1c, 0x2f, 0x4e, 0xdc, 0x6d, 0x98, + 0xa8, 0x87, 0x44, 0xbb, 0x03, 0x5e, 0xe7, 0xfe, 0x74, 0xbc, 0x3f, 0xcf, 0x1e, 0xdb, 0x97, 0xce, + 0xfe, 0xb9, 0x02, 0x9c, 0xe5, 0x2f, 0x4e, 0x0b, 0x7b, 0x81, 0xdb, 0xaa, 0x07, 0x2d, 0x91, 0xe2, + 0xe3, 0x6d, 0x18, 0x6f, 0x6b, 0x7a, 0x88, 0x5e, 0x91, 0xaa, 0x74, 0x7d, 0x45, 0x22, 0x0f, 0xea, + 0xa5, 0xd8, 0xa0, 0x85, 0x5a, 0x30, 0x4e, 0xf6, 0xdc, 0xa6, 0x7a, 0xb6, 0x28, 0x1c, 0xfb, 0x6e, + 0x50, 0xad, 0x2c, 0x6b, 0x74, 0xb0, 0x41, 0xf5, 0x21, 0x64, 0x7d, 0xb3, 0x7f, 0xcc, 0x82, 0x47, + 0x73, 0xe2, 0x5a, 0xd1, 0xe6, 0xee, 0xb1, 0xb7, 0x3d, 0x91, 0x40, 0x4a, 0x35, 0xc7, 0x5f, 0xfc, + 0xb0, 0x80, 0xa2, 0xcf, 0x01, 0xf0, 0x17, 0x3b, 0x2a, 0x1e, 0xf5, 0x0b, 0x00, 0x64, 0xc4, 0x2e, + 0xd1, 0x62, 0x4e, 0xc8, 0xfa, 0x58, 0xa3, 0x65, 0xff, 0x74, 0x11, 0x86, 0xd8, 0x0b, 0x11, 0x5a, + 0x81, 0x91, 0x6d, 0x1e, 0xe9, 0x79, 0x90, 0xa0, 0xd2, 0x89, 0x9c, 0xc9, 0x0b, 0xb0, 0xac, 0x8c, + 0x56, 0xe1, 0x0c, 0x8f, 0x94, 0xed, 0x55, 0x89, 0xe7, 0xec, 0x4b, 0x75, 0x05, 0x4f, 0xba, 0xa4, + 0xe2, 0x67, 0xd4, 0xba, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x3a, 0x4c, 0x52, 0xfe, 0x2e, 0xe8, 0xc4, + 0x92, 0x12, 0x8f, 0x91, 0xad, 0x18, 0xca, 0x75, 0x03, 0x8a, 0x53, 0xd8, 0x54, 0xf0, 0x6a, 0x77, + 0x29, 0x66, 0x86, 0x12, 0xc1, 0xcb, 0x54, 0xc6, 0x98, 0xb8, 0xcc, 0x94, 0xa9, 0xc3, 0x0c, 0xb7, + 0xd6, 0xb7, 0x43, 0x12, 0x6d, 0x07, 0x5e, 0x4b, 0xe4, 0xec, 0x4e, 0x4c, 0x99, 0x52, 0x70, 0xdc, + 0x55, 0x83, 0x52, 0xd9, 0x74, 0x5c, 0xaf, 0x13, 0x92, 0x84, 0xca, 0xb0, 0x49, 0x65, 0x25, 0x05, + 0xc7, 0x5d, 0x35, 0xe8, 0x3a, 0x3a, 0x27, 0x92, 0x68, 0x4b, 0xaf, 0x7e, 0x65, 0x9f, 0x36, 0x22, + 0xfd, 0x9b, 0x7a, 0x84, 0xb5, 0x11, 0x16, 0x3c, 0x2a, 0x0d, 0xb7, 0xa6, 0x4f, 0x14, 0x9e, 0x4d, + 0x92, 0xca, 0x83, 0xa4, 0x72, 0xfe, 0x7d, 0x0b, 0xce, 0x64, 0xd8, 0xc1, 0xf2, 0xa3, 0x6a, 0xcb, + 0x8d, 0x62, 0x95, 0x58, 0x46, 0x3b, 0xaa, 0x78, 0x39, 0x56, 0x18, 0x74, 0x3f, 0xf0, 0xc3, 0x30, + 0x7d, 0x00, 0x0a, 0x3b, 0x33, 0x01, 0x3d, 0xde, 0x01, 0x88, 0x2e, 0x41, 0xa9, 0x13, 0x11, 0x19, + 0x90, 0x4a, 0x9d, 0xdf, 0x4c, 0xc3, 0xcc, 0x20, 0x94, 0x35, 0xdd, 0x52, 0xca, 0x5d, 0x8d, 0x35, + 0xe5, 0x1a, 0x5b, 0x0e, 0xb3, 0xbf, 0x5a, 0x84, 0xf3, 0xb9, 0x16, 0xef, 0xb4, 0x4b, 0xbb, 0x81, + 0xef, 0xc6, 0x81, 0x7a, 0x7d, 0xe4, 0x21, 0x51, 0x48, 0x7b, 0x7b, 0x55, 0x94, 0x63, 0x85, 0x81, + 0x2e, 0xcb, 0x74, 0xee, 0xe9, 0xd4, 0x39, 0x8b, 0x55, 0x23, 0xa3, 0xfb, 0xa0, 0x69, 0xc9, 0x9e, + 0x84, 0x52, 0x3b, 0x08, 0xbc, 0xf4, 0x61, 0x44, 0xbb, 0x1b, 0x04, 0x1e, 0x66, 0x40, 0xf4, 0x09, + 0x31, 0x0e, 0xa9, 0xe7, 0x36, 0xec, 0xb4, 0x82, 0x48, 0x1b, 0x8c, 0xa7, 0x61, 0x64, 0x87, 0xec, + 0x87, 0xae, 0xbf, 0x95, 0x7e, 0x86, 0xbd, 0xc9, 0x8b, 0xb1, 0x84, 0x9b, 0x99, 0x23, 0x46, 0x4e, + 0x3a, 0x9f, 0xd8, 0x68, 0xdf, 0xab, 0xed, 0x87, 0x8a, 0x30, 0x85, 0x17, 0xab, 0xdf, 0x9e, 0x88, + 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0xf9, 0xc4, 0xfa, 0xcf, 0xc6, 0x2f, 0x59, 0x30, 0xc5, 0xa2, 0x2b, + 0x8b, 0x40, 0x1c, 0x6e, 0xe0, 0x9f, 0x02, 0xeb, 0xf6, 0x24, 0x0c, 0x85, 0xb4, 0xd1, 0x74, 0x92, + 0x20, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x71, 0x28, 0xb1, 0x2e, 0xd0, 0xc9, 0x1b, 0xe7, 0xf9, 0x15, + 0xaa, 0x4e, 0xec, 0x60, 0x56, 0xca, 0xbc, 0xcb, 0x31, 0x69, 0x7b, 0x2e, 0xef, 0x74, 0xf2, 0xb4, + 0xf1, 0xd1, 0xf0, 0x2e, 0xcf, 0xec, 0xda, 0x07, 0xf3, 0x2e, 0xcf, 0x26, 0xd9, 0x5b, 0x2c, 0xfa, + 0x6f, 0x05, 0xb8, 0x98, 0x59, 0x6f, 0x60, 0xef, 0xf2, 0xde, 0xb5, 0x4f, 0xc6, 0x9a, 0x26, 0xdb, + 0xc8, 0xa5, 0x78, 0x8a, 0x46, 0x2e, 0xa5, 0x41, 0x39, 0xc7, 0xa1, 0x01, 0x9c, 0xbe, 0x33, 0x87, + 0xec, 0x23, 0xe2, 0xf4, 0x9d, 0xd9, 0xb7, 0x1c, 0xb1, 0xee, 0xcf, 0x0a, 0x39, 0xdf, 0xc2, 0x04, + 0xbc, 0x2b, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xc2, 0xe3, 0xfc, 0x8c, 0xe1, 0x65, 0x58, 0x41, + 0x91, 0xab, 0xb9, 0x4f, 0x17, 0xf2, 0x53, 0x48, 0xe6, 0x36, 0x35, 0x6f, 0xbe, 0x44, 0xa9, 0x21, + 0xc8, 0x70, 0xa5, 0x5e, 0xd5, 0x84, 0xf2, 0xe2, 0xe0, 0x42, 0xf9, 0x78, 0xb6, 0x40, 0x8e, 0x16, + 0x60, 0x6a, 0xd7, 0xf5, 0xe9, 0xb1, 0xb9, 0x6f, 0xb2, 0xa2, 0x2a, 0x9a, 0xc8, 0xaa, 0x09, 0xc6, + 0x69, 0xfc, 0xb9, 0x57, 0x61, 0xe2, 0xc1, 0xd5, 0x91, 0xdf, 0x2c, 0xc2, 0x63, 0x3d, 0xb6, 0x3d, + 0x3f, 0xeb, 0x8d, 0x39, 0xd0, 0xce, 0xfa, 0xae, 0x79, 0xa8, 0xc3, 0xd9, 0xcd, 0x8e, 0xe7, 0xed, + 0x33, 0x3b, 0x52, 0xd2, 0x92, 0x18, 0x82, 0x57, 0x7c, 0x5c, 0x66, 0xb4, 0x58, 0xc9, 0xc0, 0xc1, + 0x99, 0x35, 0xd1, 0x1b, 0x80, 0x02, 0x91, 0xbf, 0xf6, 0x3a, 0xf1, 0x85, 0x7e, 0x9f, 0x0d, 0x7c, + 0x31, 0xd9, 0x8c, 0xb7, 0xbb, 0x30, 0x70, 0x46, 0x2d, 0xca, 0xf4, 0xd3, 0x5b, 0x69, 0x5f, 0x75, + 0x2b, 0xc5, 0xf4, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x1d, 0x66, 0x9c, 0x3d, 0xc7, 0xe5, 0x51, + 0xf6, 0x24, 0x01, 0xce, 0xf5, 0x2b, 0x25, 0xd8, 0x42, 0x1a, 0x01, 0x77, 0xd7, 0x49, 0x39, 0x58, + 0x0f, 0xe7, 0x3b, 0x58, 0xf7, 0x3e, 0x17, 0xfb, 0xe9, 0x74, 0xed, 0x7f, 0x6f, 0xd1, 0xeb, 0x2b, + 0x23, 0x07, 0x3d, 0x1d, 0x07, 0xa5, 0x9b, 0xd4, 0x7c, 0x9d, 0xcf, 0x69, 0x96, 0x22, 0x09, 0x10, + 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xb3, 0x8d, 0xc1, 0xba, 0x8b, 0x58, 0x09, 0x0a, 0x03, 0x7d, + 0x1e, 0x46, 0x5a, 0xee, 0x9e, 0x1b, 0x05, 0xa1, 0xd8, 0x2c, 0xc7, 0x74, 0x59, 0x48, 0xce, 0xc1, + 0x2a, 0x27, 0x83, 0x25, 0x3d, 0xfb, 0x87, 0x0a, 0x30, 0x21, 0x5b, 0x7c, 0xb3, 0x13, 0xc4, 0xce, + 0x29, 0x5c, 0xcb, 0xd7, 0x8d, 0x6b, 0xf9, 0x13, 0xbd, 0x02, 0x46, 0xb0, 0x2e, 0xe5, 0x5e, 0xc7, + 0xb7, 0x53, 0xd7, 0xf1, 0x53, 0xfd, 0x49, 0xf5, 0xbe, 0x86, 0xff, 0xa9, 0x05, 0x33, 0x06, 0xfe, + 0x29, 0xdc, 0x06, 0x2b, 0xe6, 0x6d, 0xf0, 0x44, 0xdf, 0x6f, 0xc8, 0xb9, 0x05, 0x7e, 0xa0, 0x98, + 0xea, 0x3b, 0x3b, 0xfd, 0xdf, 0x83, 0xd2, 0xb6, 0x13, 0xb6, 0x7a, 0x05, 0xa6, 0xed, 0xaa, 0x34, + 0x7f, 0xc3, 0x09, 0x5b, 0xfc, 0x0c, 0x7f, 0x56, 0x65, 0xbd, 0x74, 0xc2, 0x56, 0x5f, 0xdf, 0x32, + 0xd6, 0x14, 0x7a, 0x05, 0x86, 0xa3, 0x66, 0xd0, 0x56, 0x96, 0x9f, 0x97, 0x78, 0x46, 0x4c, 0x5a, + 0x72, 0x74, 0x50, 0x41, 0x66, 0x73, 0xb4, 0x18, 0x0b, 0x7c, 0xf4, 0x36, 0x4c, 0xb0, 0x5f, 0xca, + 0x02, 0xa2, 0x98, 0x9f, 0x0e, 0xa1, 0xa1, 0x23, 0x72, 0x43, 0x1a, 0xa3, 0x08, 0x9b, 0xa4, 0xe6, + 0xb6, 0xa0, 0xac, 0x3e, 0xeb, 0xa1, 0xfa, 0x04, 0xfd, 0x9b, 0x22, 0x9c, 0xc9, 0x58, 0x73, 0x28, + 0x32, 0x66, 0xe2, 0xf9, 0x01, 0x97, 0xea, 0x07, 0x9c, 0x8b, 0x88, 0x49, 0x43, 0x2d, 0xb1, 0xb6, + 0x06, 0x6e, 0xf4, 0x4e, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xfe, 0x8d, 0xd2, 0xc6, 0x4e, 0x6d, 0xa8, + 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xb8, 0x08, 0x67, 0xb3, 0x62, 0xd8, 0xa0, 0xef, + 0x4d, 0xa5, 0xc6, 0x79, 0x71, 0xd0, 0xe8, 0x37, 0x3c, 0x5f, 0x8e, 0x48, 0xf4, 0x3c, 0x6f, 0x26, + 0xcb, 0xe9, 0x3b, 0xcc, 0xa2, 0x4d, 0xe6, 0x48, 0x1a, 0xf2, 0x94, 0x46, 0xf2, 0xf8, 0xf8, 0xf4, + 0xc0, 0x1d, 0x10, 0xb9, 0x90, 0xa2, 0x94, 0x23, 0xa9, 0x2c, 0xee, 0xef, 0x48, 0x2a, 0x5b, 0x9e, + 0x73, 0x61, 0x4c, 0xfb, 0x9a, 0x87, 0x3a, 0xe3, 0x3b, 0xf4, 0xb6, 0xd2, 0xfa, 0xfd, 0x50, 0x67, + 0xfd, 0xc7, 0x2c, 0x48, 0x99, 0x59, 0x2a, 0x75, 0x97, 0x95, 0xab, 0xee, 0xba, 0x04, 0xa5, 0x30, + 0xf0, 0x48, 0x3a, 0x13, 0x0d, 0x0e, 0x3c, 0x82, 0x19, 0x84, 0x62, 0xc4, 0x89, 0xb2, 0x63, 0x5c, + 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x24, 0x0c, 0x79, 0x64, 0x8f, 0x78, 0xe9, 0x30, 0xef, 0xb7, 0x68, + 0x21, 0xe6, 0x30, 0xfb, 0x97, 0x4a, 0x70, 0xa1, 0xa7, 0x2b, 0x36, 0x15, 0x87, 0xb6, 0x9c, 0x98, + 0xdc, 0x73, 0xf6, 0xd3, 0xf1, 0x98, 0xaf, 0xf3, 0x62, 0x2c, 0xe1, 0xcc, 0xf2, 0x9c, 0xc7, 0x5f, + 0x4c, 0x29, 0x07, 0x45, 0xd8, 0x45, 0x01, 0x7d, 0x08, 0x49, 0xee, 0xaf, 0x01, 0x44, 0x91, 0xc7, + 0xed, 0x06, 0x5a, 0xc2, 0xa4, 0x3d, 0x89, 0xd3, 0xd9, 0xb8, 0x25, 0x20, 0x58, 0xc3, 0x42, 0x55, + 0x98, 0x6e, 0x87, 0x41, 0xcc, 0x75, 0xad, 0x55, 0x6e, 0x70, 0x34, 0x64, 0x7a, 0xc1, 0xd6, 0x53, + 0x70, 0xdc, 0x55, 0x03, 0xbd, 0x04, 0x63, 0xc2, 0x33, 0xb6, 0x1e, 0x04, 0x9e, 0x50, 0x03, 0x29, + 0xf3, 0x95, 0x46, 0x02, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, 0x70, 0x47, 0x32, 0xab, 0x71, 0x25, + 0xae, 0x86, 0x97, 0x8a, 0x67, 0x35, 0x3a, 0x50, 0x3c, 0xab, 0x44, 0x31, 0x56, 0x1e, 0xf8, 0xcd, + 0x0a, 0xfa, 0xaa, 0x92, 0x7e, 0xbe, 0x04, 0x67, 0xc4, 0xc2, 0x79, 0xd8, 0xcb, 0xe5, 0x21, 0xa5, + 0xe2, 0xff, 0xf6, 0x9a, 0x39, 0xed, 0x35, 0xf3, 0xc3, 0x16, 0x98, 0xec, 0x15, 0xfa, 0xff, 0x72, + 0x03, 0xda, 0xbf, 0x94, 0xcb, 0xae, 0xb5, 0xe4, 0x05, 0xf2, 0x01, 0x43, 0xdb, 0xdb, 0xff, 0xce, + 0x82, 0x27, 0xfa, 0x52, 0x44, 0xcb, 0x50, 0x66, 0x3c, 0xa0, 0x26, 0x9d, 0x3d, 0xa5, 0x0c, 0x12, + 0x25, 0x20, 0x87, 0x25, 0x4d, 0x6a, 0xa2, 0xe5, 0xae, 0xcc, 0x01, 0x4f, 0x67, 0x64, 0x0e, 0x38, + 0x67, 0x0c, 0xcf, 0x03, 0xa6, 0x0e, 0xf8, 0xd5, 0x22, 0x0c, 0xf3, 0x15, 0x7f, 0x0a, 0x62, 0xd8, + 0x8a, 0xd0, 0xdb, 0xf6, 0x88, 0x68, 0xc5, 0xfb, 0x32, 0x5f, 0x75, 0x62, 0x87, 0xb3, 0x09, 0xea, + 0xb6, 0x4a, 0x34, 0xbc, 0x68, 0xde, 0xb8, 0xcf, 0xe6, 0x52, 0x8a, 0x49, 0xe0, 0x34, 0xb4, 0xdb, + 0xed, 0x8b, 0x00, 0x11, 0xcb, 0xba, 0x4f, 0x69, 0x88, 0xd8, 0x68, 0x9f, 0xec, 0xd1, 0x7a, 0x43, + 0x21, 0xf3, 0x3e, 0x24, 0x3b, 0x5d, 0x01, 0xb0, 0x46, 0x71, 0xee, 0x65, 0x28, 0x2b, 0xe4, 0x7e, + 0x5a, 0x9c, 0x71, 0x9d, 0xb9, 0xf8, 0x2c, 0x4c, 0xa5, 0xda, 0x3a, 0x96, 0x12, 0xe8, 0x97, 0x2d, + 0x98, 0xe2, 0x5d, 0x5e, 0xf6, 0xf7, 0xc4, 0x99, 0xfa, 0x3e, 0x9c, 0xf5, 0x32, 0xce, 0x36, 0x31, + 0xa3, 0x83, 0x9f, 0x85, 0x4a, 0xe9, 0x93, 0x05, 0xc5, 0x99, 0x6d, 0xa0, 0x2b, 0x74, 0xdd, 0xd2, + 0xb3, 0xcb, 0xf1, 0x84, 0x17, 0xd3, 0x38, 0x5f, 0xb3, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0xbb, 0x16, + 0xcc, 0xf0, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, 0x7d, 0x17, 0xc9, 0x3c, 0x0a, 0x39, + 0xc9, 0x3c, 0xf4, 0x4f, 0x2b, 0xf6, 0xfc, 0xb4, 0x9f, 0xb3, 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, + 0xbf, 0xd3, 0x14, 0xe5, 0xe7, 0xf2, 0x17, 0x75, 0x8e, 0x0c, 0xff, 0xa7, 0x16, 0x4c, 0x73, 0x84, + 0xe4, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xf2, 0xa9, 0x54, 0xdd, 0xd9, 0x1f, 0x65, 0x4c, + 0x56, 0xa9, 0xe7, 0x64, 0xb5, 0xe4, 0x06, 0x3a, 0x46, 0x46, 0xca, 0x63, 0x07, 0xc5, 0xb6, 0xff, + 0xc8, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, 0x52, 0xed, 0xba, 0x48, 0x8e, 0x1a, + 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xca, 0x20, 0xa0, 0xd8, 0xdf, 0x20, 0xe0, 0x18, 0x23, + 0xfa, 0xbf, 0x4b, 0x90, 0x76, 0x2b, 0x40, 0x77, 0x61, 0xbc, 0xe9, 0xb4, 0x9d, 0x0d, 0xd7, 0x73, + 0x63, 0x97, 0x44, 0xbd, 0x2c, 0x89, 0x96, 0x34, 0x3c, 0xf1, 0xd4, 0xab, 0x95, 0x60, 0x83, 0x0e, + 0x9a, 0x07, 0x68, 0x87, 0xee, 0x9e, 0xeb, 0x91, 0x2d, 0xa6, 0x71, 0x60, 0x7e, 0x93, 0xdc, 0x3c, + 0x46, 0x96, 0x62, 0x0d, 0x23, 0xc3, 0x05, 0xae, 0xf8, 0xf0, 0x5c, 0xe0, 0x4a, 0xc7, 0x74, 0x81, + 0x1b, 0x1a, 0xc8, 0x05, 0x0e, 0xc3, 0x23, 0x92, 0x45, 0xa2, 0xff, 0x57, 0x5c, 0x8f, 0x08, 0xbe, + 0x98, 0x7b, 0x53, 0xce, 0x1d, 0x1e, 0x54, 0x1e, 0xc1, 0x99, 0x18, 0x38, 0xa7, 0x26, 0xfa, 0x1c, + 0xcc, 0x3a, 0x9e, 0x17, 0xdc, 0x53, 0xa3, 0xb6, 0x1c, 0x35, 0x1d, 0x8f, 0x6b, 0xec, 0x47, 0x18, + 0xd5, 0xc7, 0x0f, 0x0f, 0x2a, 0xb3, 0x0b, 0x39, 0x38, 0x38, 0xb7, 0x76, 0xca, 0x83, 0x6e, 0xb4, + 0xaf, 0x07, 0xdd, 0x6b, 0x50, 0x6e, 0x87, 0x41, 0x73, 0x55, 0xf3, 0xea, 0xb9, 0xc8, 0xf2, 0xdd, + 0xcb, 0xc2, 0xa3, 0x83, 0xca, 0x84, 0xfa, 0xc3, 0x6e, 0xf8, 0xa4, 0x82, 0xbd, 0x03, 0x67, 0x1a, + 0x24, 0x74, 0x59, 0x26, 0xcd, 0x56, 0xb2, 0xa1, 0xd7, 0xa1, 0x1c, 0xa6, 0x8e, 0xb0, 0x81, 0x02, + 0x34, 0x69, 0xd1, 0x82, 0xe5, 0x91, 0x95, 0x10, 0xb2, 0xff, 0xc4, 0x82, 0x11, 0x61, 0x61, 0x7e, + 0x0a, 0x9c, 0xd3, 0x82, 0xa1, 0xc0, 0xae, 0x64, 0x1f, 0xf3, 0xac, 0x33, 0xb9, 0xaa, 0xeb, 0x5a, + 0x4a, 0x75, 0xfd, 0x44, 0x2f, 0x22, 0xbd, 0x95, 0xd6, 0x7f, 0xb3, 0x08, 0x93, 0xa6, 0x53, 0xc8, + 0x29, 0x0c, 0xc1, 0x1a, 0x8c, 0x44, 0xc2, 0x03, 0xa9, 0x90, 0x6f, 0x39, 0x9d, 0x9e, 0xc4, 0xc4, + 0x2c, 0x4a, 0xf8, 0x1c, 0x49, 0x22, 0x99, 0xae, 0x4d, 0xc5, 0x87, 0xe8, 0xda, 0xd4, 0xcf, 0x2f, + 0xa7, 0x74, 0x12, 0x7e, 0x39, 0xf6, 0xd7, 0xd9, 0x55, 0xa3, 0x97, 0x9f, 0x02, 0x17, 0x72, 0xdd, + 0xbc, 0x94, 0xec, 0x1e, 0x2b, 0x4b, 0x74, 0x2a, 0x87, 0x1b, 0xf9, 0x45, 0x0b, 0x2e, 0x64, 0x7c, + 0x95, 0xc6, 0x9a, 0x3c, 0x0b, 0xa3, 0x4e, 0xa7, 0xe5, 0xaa, 0xbd, 0xac, 0x3d, 0x63, 0x2d, 0x88, + 0x72, 0xac, 0x30, 0xd0, 0x12, 0xcc, 0x90, 0xfb, 0x6d, 0x97, 0xbf, 0x23, 0xea, 0xb6, 0x8b, 0x45, + 0x1e, 0xaa, 0x76, 0x39, 0x0d, 0xc4, 0xdd, 0xf8, 0xca, 0xad, 0xbb, 0x98, 0xeb, 0xd6, 0xfd, 0x0f, + 0x2c, 0x18, 0x53, 0xde, 0x26, 0x0f, 0x7d, 0xb4, 0xbf, 0xcb, 0x1c, 0xed, 0xc7, 0x7a, 0x8c, 0x76, + 0xce, 0x30, 0xff, 0xed, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x01, 0x58, 0x9e, 0x57, 0x60, 0xb4, + 0x1d, 0x06, 0x71, 0xd0, 0x0c, 0x3c, 0xc1, 0xf1, 0x3c, 0x9e, 0x44, 0x1d, 0xe0, 0xe5, 0x47, 0xda, + 0x6f, 0xac, 0xb0, 0xd9, 0xe8, 0x05, 0x61, 0x2c, 0xb8, 0x8c, 0x64, 0xf4, 0x82, 0x30, 0xc6, 0x0c, + 0x82, 0x5a, 0x00, 0xb1, 0x13, 0x6e, 0x91, 0x98, 0x96, 0x89, 0x00, 0x26, 0xf9, 0x87, 0x47, 0x27, + 0x76, 0xbd, 0x79, 0xd7, 0x8f, 0xa3, 0x38, 0x9c, 0xaf, 0xf9, 0xf1, 0xed, 0x90, 0x0b, 0x50, 0x5a, + 0x18, 0x01, 0x45, 0x0b, 0x6b, 0x74, 0xa5, 0xaf, 0x27, 0x6b, 0x63, 0xc8, 0x7c, 0x10, 0x5f, 0x13, + 0xe5, 0x58, 0x61, 0xd8, 0x2f, 0xb3, 0xab, 0x84, 0x0d, 0xd0, 0xf1, 0x3c, 0xfc, 0xbf, 0x31, 0xaa, + 0x86, 0x96, 0xbd, 0x86, 0x55, 0xf5, 0x38, 0x02, 0xbd, 0x4f, 0x6e, 0xda, 0xb0, 0xee, 0x47, 0x93, + 0x04, 0x1b, 0x40, 0xdf, 0xdd, 0x65, 0x27, 0xf1, 0x5c, 0x9f, 0x2b, 0xe0, 0x18, 0x96, 0x11, 0x2c, + 0x7c, 0x36, 0x0b, 0x33, 0x5c, 0xab, 0x8b, 0x45, 0xae, 0x85, 0xcf, 0x16, 0x00, 0x9c, 0xe0, 0xa0, + 0xab, 0x42, 0xfc, 0x2e, 0x19, 0x49, 0xf4, 0xa4, 0xf8, 0x2d, 0x3f, 0x5f, 0x93, 0xbf, 0x9f, 0x87, + 0x31, 0x95, 0x4c, 0xaf, 0xce, 0x73, 0x92, 0x89, 0x70, 0x2e, 0xcb, 0x49, 0x31, 0xd6, 0x71, 0xd0, + 0x3a, 0x4c, 0x45, 0x5c, 0xf7, 0xa2, 0xa2, 0xf6, 0x71, 0x1d, 0xd6, 0x27, 0xa5, 0x7d, 0x45, 0xc3, + 0x04, 0x1f, 0xb1, 0x22, 0x7e, 0x74, 0x48, 0x87, 0xcd, 0x34, 0x09, 0xf4, 0x3a, 0x4c, 0x7a, 0x7a, + 0xda, 0xfa, 0xba, 0x50, 0x71, 0x29, 0xf3, 0x63, 0x23, 0xa9, 0x7d, 0x1d, 0xa7, 0xb0, 0x29, 0xa7, + 0xa4, 0x97, 0x88, 0x48, 0x93, 0x8e, 0xbf, 0x45, 0x22, 0x91, 0x0a, 0x8c, 0x71, 0x4a, 0xb7, 0x72, + 0x70, 0x70, 0x6e, 0x6d, 0xf4, 0x0a, 0x8c, 0xcb, 0xcf, 0xd7, 0xdc, 0x91, 0x13, 0x23, 0x77, 0x0d, + 0x86, 0x0d, 0x4c, 0x74, 0x0f, 0xce, 0xc9, 0xff, 0xeb, 0xa1, 0xb3, 0xb9, 0xe9, 0x36, 0x85, 0x37, + 0x38, 0xf7, 0xf4, 0x59, 0x90, 0xae, 0x43, 0xcb, 0x59, 0x48, 0x47, 0x07, 0x95, 0x4b, 0x62, 0xd4, + 0x32, 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x55, 0x38, 0xb3, 0x4d, 0x1c, 0x2f, 0xde, 0x5e, 0xda, + 0x26, 0xcd, 0x1d, 0xb9, 0x89, 0x98, 0x93, 0xb3, 0x66, 0x1a, 0x7e, 0xa3, 0x1b, 0x05, 0x67, 0xd5, + 0x43, 0xef, 0xc0, 0x6c, 0xbb, 0xb3, 0xe1, 0xb9, 0xd1, 0xf6, 0x5a, 0x10, 0x33, 0x93, 0x0e, 0x95, + 0x8b, 0x4e, 0x78, 0x43, 0x2b, 0x07, 0xef, 0x7a, 0x0e, 0x1e, 0xce, 0xa5, 0x80, 0xde, 0x87, 0x73, + 0xa9, 0xc5, 0x20, 0x7c, 0x33, 0x27, 0xf3, 0xe3, 0xf6, 0x36, 0xb2, 0x2a, 0x08, 0x5f, 0xcb, 0x2c, + 0x10, 0xce, 0x6e, 0xe2, 0x83, 0x19, 0xfa, 0xbc, 0x47, 0x2b, 0x6b, 0x4c, 0x19, 0xfa, 0x12, 0x8c, + 0xeb, 0xab, 0x48, 0x5c, 0x30, 0x97, 0xb3, 0x79, 0x16, 0x6d, 0xb5, 0x71, 0x96, 0x4e, 0xad, 0x28, + 0x1d, 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xef, 0x43, 0xb7, 0x60, 0xb4, 0xe9, 0xb9, 0xc4, 0x8f, + 0x6b, 0xf5, 0x5e, 0xc1, 0x43, 0x96, 0x04, 0x8e, 0x18, 0x30, 0x11, 0xe8, 0x94, 0x97, 0x61, 0x45, + 0xc1, 0xfe, 0x8d, 0x02, 0x54, 0xfa, 0x44, 0xcd, 0x4d, 0xe9, 0xa3, 0xad, 0x81, 0xf4, 0xd1, 0x0b, + 0x32, 0xb3, 0xde, 0x5a, 0x4a, 0x48, 0x4f, 0x65, 0xcd, 0x4b, 0x44, 0xf5, 0x34, 0xfe, 0xc0, 0xf6, + 0xc1, 0xba, 0x4a, 0xbb, 0xd4, 0xd7, 0x72, 0xdd, 0x78, 0xca, 0x1a, 0x1a, 0x5c, 0x10, 0xc9, 0x7d, + 0x96, 0xb0, 0xbf, 0x5e, 0x80, 0x73, 0x6a, 0x08, 0xbf, 0x75, 0x07, 0xee, 0x4e, 0xf7, 0xc0, 0x9d, + 0xc0, 0xa3, 0x8e, 0x7d, 0x1b, 0x86, 0x79, 0xf0, 0x95, 0x01, 0x18, 0xa0, 0x27, 0xcd, 0x48, 0x5d, + 0xea, 0x9a, 0x36, 0xa2, 0x75, 0xfd, 0x25, 0x0b, 0xa6, 0xd6, 0x97, 0xea, 0x8d, 0xa0, 0xb9, 0x43, + 0xe2, 0x05, 0xce, 0xb0, 0x62, 0xc1, 0xff, 0x58, 0x0f, 0xc8, 0xd7, 0x64, 0x71, 0x4c, 0x97, 0xa0, + 0xb4, 0x1d, 0x44, 0x71, 0xfa, 0xc5, 0xf7, 0x46, 0x10, 0xc5, 0x98, 0x41, 0xec, 0xdf, 0xb3, 0x60, + 0x88, 0xe5, 0x83, 0xed, 0x97, 0xa4, 0x78, 0x90, 0xef, 0x42, 0x2f, 0xc1, 0x30, 0xd9, 0xdc, 0x24, + 0xcd, 0x58, 0xcc, 0xaa, 0x74, 0x47, 0x1d, 0x5e, 0x66, 0xa5, 0xf4, 0xd2, 0x67, 0x8d, 0xf1, 0xbf, + 0x58, 0x20, 0xa3, 0xb7, 0xa0, 0x1c, 0xbb, 0xbb, 0x64, 0xa1, 0xd5, 0x12, 0x6f, 0x66, 0x0f, 0xe0, + 0xfd, 0xbb, 0x2e, 0x09, 0xe0, 0x84, 0x96, 0xfd, 0xd5, 0x02, 0x40, 0x12, 0x42, 0xa0, 0xdf, 0x27, + 0x2e, 0x76, 0xbd, 0xa6, 0x5c, 0xce, 0x78, 0x4d, 0x41, 0x09, 0xc1, 0x8c, 0xa7, 0x14, 0x35, 0x4c, + 0xc5, 0x81, 0x86, 0xa9, 0x74, 0x9c, 0x61, 0x5a, 0x82, 0x99, 0x24, 0x04, 0x82, 0x19, 0x0f, 0x86, + 0x09, 0x29, 0xeb, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0x09, 0x5c, 0x92, 0x91, 0x39, 0xe5, 0x5d, 0xc3, + 0x4c, 0x32, 0x8f, 0x91, 0xaf, 0x3a, 0x79, 0x2e, 0x2a, 0xe4, 0x3e, 0x17, 0xfd, 0xa4, 0x05, 0x67, + 0xd3, 0xed, 0x30, 0xdf, 0xb7, 0xaf, 0x58, 0x70, 0x8e, 0x3d, 0x9a, 0xb1, 0x56, 0xbb, 0x9f, 0xe8, + 0x5e, 0xcc, 0x0e, 0x0d, 0xd1, 0xbb, 0xc7, 0x89, 0xdf, 0xf3, 0x6a, 0x16, 0x69, 0x9c, 0xdd, 0xa2, + 0xfd, 0x15, 0x0b, 0xce, 0xe7, 0xa6, 0x21, 0x42, 0x57, 0x60, 0xd4, 0x69, 0xbb, 0x5c, 0x23, 0x25, + 0xf6, 0x3b, 0x93, 0x1e, 0xeb, 0x35, 0xae, 0x8f, 0x52, 0x50, 0x95, 0x1e, 0xb1, 0x90, 0x9b, 0x1e, + 0xb1, 0x6f, 0xb6, 0x43, 0xfb, 0x07, 0x2d, 0x10, 0xee, 0x4e, 0x03, 0x1c, 0x32, 0x6f, 0xcb, 0xec, + 0xb2, 0x46, 0x50, 0xf4, 0x4b, 0xf9, 0xfe, 0x5f, 0x22, 0x14, 0xba, 0xba, 0xd4, 0x8d, 0x00, 0xe8, + 0x06, 0x2d, 0xbb, 0x05, 0x02, 0x5a, 0x25, 0x4c, 0x67, 0xd5, 0xbf, 0x37, 0xd7, 0x00, 0x5a, 0x0c, + 0x57, 0xcb, 0x31, 0xa9, 0xae, 0x90, 0xaa, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0x91, 0x02, 0x8c, 0xc9, + 0x20, 0xdc, 0x1d, 0x7f, 0x10, 0xc9, 0xf2, 0x58, 0xb9, 0x78, 0x58, 0x52, 0x56, 0x4a, 0xb8, 0x9e, + 0x08, 0xe4, 0x49, 0x52, 0x56, 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1a, 0x46, 0xa2, 0xce, 0x06, 0x43, + 0x4f, 0x39, 0xf1, 0x34, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x39, 0x98, 0xe6, 0xf5, 0xc2, 0xa0, 0xed, + 0x6c, 0x71, 0xf5, 0xe7, 0x90, 0xf2, 0xaa, 0x9d, 0x5e, 0x4d, 0xc1, 0x8e, 0x0e, 0x2a, 0x67, 0xd3, + 0x65, 0x4c, 0x71, 0xde, 0x45, 0xc5, 0xfe, 0x12, 0xa0, 0xee, 0xb8, 0xe2, 0xe8, 0x0d, 0x6e, 0x4a, + 0xe5, 0x86, 0xa4, 0xd5, 0x4b, 0x23, 0xae, 0x3b, 0x81, 0x4a, 0x43, 0x7a, 0x5e, 0x0b, 0xab, 0xfa, + 0xf6, 0x5f, 0x2d, 0xc2, 0x74, 0xda, 0x25, 0x10, 0xdd, 0x80, 0x61, 0x7e, 0xd9, 0x09, 0xf2, 0x3d, + 0x1e, 0x5c, 0x35, 0x47, 0x42, 0xb6, 0xed, 0xc5, 0x7d, 0x29, 0xea, 0xa3, 0x77, 0x60, 0xac, 0x15, + 0xdc, 0xf3, 0xef, 0x39, 0x61, 0x6b, 0xa1, 0x5e, 0x13, 0xeb, 0x32, 0x93, 0x67, 0xae, 0x26, 0x68, + 0xba, 0x73, 0x22, 0x7b, 0x5c, 0x48, 0x40, 0x58, 0x27, 0x87, 0xd6, 0x59, 0xac, 0xc4, 0x4d, 0x77, + 0x6b, 0xd5, 0x69, 0xf7, 0xb2, 0xab, 0x5d, 0x92, 0x48, 0x1a, 0xe5, 0x09, 0x11, 0x50, 0x91, 0x03, + 0x70, 0x42, 0x08, 0x7d, 0x2f, 0x9c, 0x89, 0x72, 0xd4, 0x6c, 0x79, 0x69, 0x26, 0x7a, 0x69, 0x9e, + 0x16, 0x1f, 0xa5, 0xd2, 0x4c, 0x96, 0x42, 0x2e, 0xab, 0x19, 0xfb, 0xcb, 0x67, 0xc0, 0xd8, 0x8d, + 0x46, 0xae, 0x21, 0xeb, 0x84, 0x72, 0x0d, 0x61, 0x18, 0x25, 0xbb, 0xed, 0x78, 0xbf, 0xea, 0x86, + 0xbd, 0x72, 0xe1, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, 0x9d, 0xec, 0x84, 0x50, 0xc5, + 0x0f, 0x31, 0x21, 0x54, 0xe9, 0x14, 0x13, 0x42, 0xad, 0xc1, 0xc8, 0x96, 0x1b, 0x63, 0xd2, 0x0e, + 0x04, 0x9b, 0x99, 0xb9, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0x24, 0x24, 0x02, 0x80, 0x25, 0x11, 0xf4, + 0x86, 0xda, 0x81, 0xc3, 0xf9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xe6, 0x1e, 0x14, 0x09, 0xa0, 0x46, + 0x1e, 0x34, 0x01, 0xd4, 0x8a, 0x4c, 0xdb, 0x34, 0x9a, 0x6f, 0x04, 0xcf, 0xb2, 0x32, 0xf5, 0x49, + 0xd6, 0x64, 0x24, 0xb8, 0x2a, 0x9f, 0x5c, 0x82, 0xab, 0x1f, 0xb4, 0xe0, 0x5c, 0x3b, 0x2b, 0xd7, + 0x9b, 0x48, 0xb6, 0xf4, 0xd2, 0xc0, 0xc9, 0xec, 0x8c, 0x06, 0x99, 0xb8, 0x9e, 0x89, 0x86, 0xb3, + 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xb4, 0x44, 0x86, 0xa6, 0x27, 0x73, 0x32, 0x65, 0xf5, 0xc8, 0x8f, + 0xb5, 0x9e, 0x91, 0x95, 0xe9, 0xe3, 0x79, 0x59, 0x99, 0x06, 0xce, 0xc5, 0xf4, 0x86, 0xca, 0x91, + 0x35, 0x91, 0xbf, 0x94, 0x78, 0x06, 0xac, 0xbe, 0x99, 0xb1, 0xde, 0x50, 0x99, 0xb1, 0x7a, 0xc4, + 0x8c, 0xe3, 0x79, 0xaf, 0xfa, 0xe6, 0xc3, 0xd2, 0x72, 0x5a, 0x4d, 0x9d, 0x4c, 0x4e, 0x2b, 0xe3, + 0xaa, 0xe1, 0x69, 0x95, 0x9e, 0xe9, 0x73, 0xd5, 0x18, 0x74, 0x7b, 0x5f, 0x36, 0x3c, 0x7f, 0xd7, + 0xcc, 0x03, 0xe5, 0xef, 0xba, 0xab, 0xe7, 0xc3, 0x42, 0x7d, 0x12, 0x3e, 0x51, 0xa4, 0x01, 0xb3, + 0x60, 0xdd, 0xd5, 0x2f, 0xc0, 0x33, 0xf9, 0x74, 0xd5, 0x3d, 0xd7, 0x4d, 0x37, 0xf3, 0x0a, 0xec, + 0xca, 0xae, 0x75, 0xf6, 0x74, 0xb2, 0x6b, 0x9d, 0x3b, 0xf1, 0xec, 0x5a, 0x8f, 0x9c, 0x42, 0x76, + 0xad, 0x47, 0x3f, 0xd4, 0xec, 0x5a, 0xb3, 0x0f, 0x21, 0xbb, 0xd6, 0x5a, 0x92, 0x5d, 0xeb, 0x7c, + 0xfe, 0x94, 0x64, 0x58, 0xe6, 0xe6, 0xe4, 0xd4, 0xba, 0xcb, 0x9e, 0xe7, 0x79, 0xcc, 0x0a, 0x11, + 0xd4, 0x2e, 0x3b, 0x7f, 0x70, 0x56, 0x60, 0x0b, 0x3e, 0x25, 0x0a, 0x84, 0x13, 0x52, 0x94, 0x6e, + 0x92, 0x63, 0xeb, 0xb1, 0x1e, 0x0a, 0xd9, 0x2c, 0x55, 0x57, 0x7e, 0x66, 0x2d, 0xfb, 0x2f, 0x17, + 0xe0, 0x62, 0xef, 0x75, 0x9d, 0xe8, 0xc9, 0xea, 0xc9, 0xbb, 0x4e, 0x4a, 0x4f, 0xc6, 0x85, 0x9c, + 0x04, 0x6b, 0xe0, 0xc0, 0x3e, 0xd7, 0x61, 0x46, 0x99, 0xe4, 0x7a, 0x6e, 0x73, 0x5f, 0xcb, 0x2b, + 0xac, 0x5c, 0x0f, 0x1b, 0x69, 0x04, 0xdc, 0x5d, 0x07, 0x2d, 0xc0, 0x94, 0x51, 0x58, 0xab, 0x0a, + 0x61, 0x46, 0x29, 0xe6, 0x1a, 0x26, 0x18, 0xa7, 0xf1, 0xed, 0x9f, 0xb5, 0xe0, 0xd1, 0x9c, 0xc4, + 0x13, 0x03, 0xc7, 0xad, 0xd9, 0x84, 0xa9, 0xb6, 0x59, 0xb5, 0x4f, 0x78, 0x2b, 0x23, 0xbd, 0x85, + 0xea, 0x6b, 0x0a, 0x80, 0xd3, 0x44, 0x17, 0xaf, 0xfc, 0xf6, 0x1f, 0x5c, 0xfc, 0xd8, 0xef, 0xfc, + 0xc1, 0xc5, 0x8f, 0xfd, 0xee, 0x1f, 0x5c, 0xfc, 0xd8, 0xff, 0x7f, 0x78, 0xd1, 0xfa, 0xed, 0xc3, + 0x8b, 0xd6, 0xef, 0x1c, 0x5e, 0xb4, 0x7e, 0xf7, 0xf0, 0xa2, 0xf5, 0xfb, 0x87, 0x17, 0xad, 0xaf, + 0xfe, 0xe1, 0xc5, 0x8f, 0xbd, 0x5d, 0xd8, 0x7b, 0xfe, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xd1, + 0x90, 0x44, 0x98, 0x55, 0xe7, 0x00, 0x00, } diff --git a/staging/src/k8s.io/api/core/v1/generated.proto b/staging/src/k8s.io/api/core/v1/generated.proto index f76251d524e..6454adfc32e 100644 --- a/staging/src/k8s.io/api/core/v1/generated.proto +++ b/staging/src/k8s.io/api/core/v1/generated.proto @@ -3126,6 +3126,11 @@ message PodSpec { // This is an alpha feature and may change in the future. // +optional optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // +optional + optional bool enableServiceLinks = 30; } // PodStatus represents information about the status of a pod. Status may trail the actual diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index 1ed9377244d..9b575bf68a2 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2891,8 +2891,17 @@ type PodSpec struct { // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` } +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { diff --git a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go index c781e5452d9..840972873f4 100644 --- a/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -1528,6 +1528,7 @@ var map_PodSpec = map[string]string{ "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", + "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.", } func (PodSpec) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go index f8f3471a5ed..42a5ca07518 100644 --- a/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -3554,6 +3554,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } return } diff --git a/staging/src/k8s.io/api/events/v1beta1/doc.go b/staging/src/k8s.io/api/events/v1beta1/doc.go index 8b1a3e312de..bd269c6d235 100644 --- a/staging/src/k8s.io/api/events/v1beta1/doc.go +++ b/staging/src/k8s.io/api/events/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=events.k8s.io + package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/staging/src/k8s.io/api/imagepolicy/v1alpha1/doc.go b/staging/src/k8s.io/api/imagepolicy/v1alpha1/doc.go index 3b4840ad64e..598dff2a82a 100644 --- a/staging/src/k8s.io/api/imagepolicy/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=imagepolicy.k8s.io + package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/staging/src/k8s.io/api/networking/v1/doc.go b/staging/src/k8s.io/api/networking/v1/doc.go index ef9ae2ae4cf..887c3664796 100644 --- a/staging/src/k8s.io/api/networking/v1/doc.go +++ b/staging/src/k8s.io/api/networking/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io + package v1 // import "k8s.io/api/networking/v1" diff --git a/staging/src/k8s.io/api/policy/v1beta1/doc.go b/staging/src/k8s.io/api/policy/v1beta1/doc.go index 9c456f9237f..74611c6ba5d 100644 --- a/staging/src/k8s.io/api/policy/v1beta1/doc.go +++ b/staging/src/k8s.io/api/policy/v1beta1/doc.go @@ -15,9 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. -// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/staging/src/k8s.io/api/rbac/v1/doc.go b/staging/src/k8s.io/api/rbac/v1/doc.go index 28ceb269b4e..76899ef0965 100644 --- a/staging/src/k8s.io/api/rbac/v1/doc.go +++ b/staging/src/k8s.io/api/rbac/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1 // import "k8s.io/api/rbac/v1" diff --git a/staging/src/k8s.io/api/rbac/v1alpha1/doc.go b/staging/src/k8s.io/api/rbac/v1alpha1/doc.go index 5236a477f00..f2547a58f77 100644 --- a/staging/src/k8s.io/api/rbac/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/rbac/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/staging/src/k8s.io/api/rbac/v1beta1/doc.go b/staging/src/k8s.io/api/rbac/v1beta1/doc.go index 4b77c9c6b83..516625eeeae 100644 --- a/staging/src/k8s.io/api/rbac/v1beta1/doc.go +++ b/staging/src/k8s.io/api/rbac/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io + package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/staging/src/k8s.io/api/scheduling/v1alpha1/doc.go b/staging/src/k8s.io/api/scheduling/v1alpha1/doc.go index e10d07ff742..05a454a529c 100644 --- a/staging/src/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/scheduling/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/staging/src/k8s.io/api/scheduling/v1beta1/doc.go b/staging/src/k8s.io/api/scheduling/v1beta1/doc.go index f2dd1cfac70..7cf1af2124c 100644 --- a/staging/src/k8s.io/api/scheduling/v1beta1/doc.go +++ b/staging/src/k8s.io/api/scheduling/v1beta1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io + package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/staging/src/k8s.io/api/settings/v1alpha1/doc.go b/staging/src/k8s.io/api/settings/v1alpha1/doc.go index 05a62c569ed..9126211d645 100644 --- a/staging/src/k8s.io/api/settings/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/settings/v1alpha1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:openapi-gen=true // +groupName=settings.k8s.io + package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/staging/src/k8s.io/api/storage/v1/doc.go b/staging/src/k8s.io/api/storage/v1/doc.go index 8f4a4045c43..ff8bb34ca1c 100644 --- a/staging/src/k8s.io/api/storage/v1/doc.go +++ b/staging/src/k8s.io/api/storage/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1 diff --git a/staging/src/k8s.io/api/storage/v1alpha1/doc.go b/staging/src/k8s.io/api/storage/v1alpha1/doc.go index aa94aff7fbb..0056b00d975 100644 --- a/staging/src/k8s.io/api/storage/v1alpha1/doc.go +++ b/staging/src/k8s.io/api/storage/v1alpha1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/staging/src/k8s.io/api/storage/v1beta1/doc.go b/staging/src/k8s.io/api/storage/v1beta1/doc.go index 8957a4cf245..ea7667dda38 100644 --- a/staging/src/k8s.io/api/storage/v1beta1/doc.go +++ b/staging/src/k8s.io/api/storage/v1beta1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true + package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index a807ce31150..2f70977e255 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -572,7 +572,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/pborman/uuid", diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go index 73d79a45d57..d58bcc1e620 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=cr.example.apiextensions.k8s.io // Package v1 is the v1 version of the API. -// +groupName=cr.example.apiextensions.k8s.io package v1 diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go index 0517ec6a844..2a6b02dccd2 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiextensions.k8s.io // Package apiextensions is the internal version of the API. -// +groupName=apiextensions.k8s.io package apiextensions // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go index 50ab2b54c69..acd09aca287 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go @@ -17,8 +17,8 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions // +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +groupName=apiextensions.k8s.io // Package v1beta1 is the v1beta1 version of the API. -// +groupName=apiextensions.k8s.io -// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index ba33fb991c0..ab975f4fda3 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -100,7 +100,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/mxk/go-flowrate/flowrate", diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 61f201cdf5d..dbaa87c879f 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index dc461cc2968..46b0e133c37 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io + package v1beta1 diff --git a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go index 732d2038130..5c872fdbf30 100644 --- a/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/apis/testapigroup/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=testapigroup.apimachinery.k8s.io + package v1 // import "k8s.io/apimachinery/pkg/apis/testapigroup/v1" diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 5f02961d326..4c67ed59801 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr *GroupResource) String() string { +func (gr GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr *GroupVersionResource) String() string { +func (gvr GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk *GroupKind) String() string { +func (gk GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -281,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk == nil { +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go index dd781cbc803..2699597e7a5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -67,6 +67,9 @@ type SpdyRoundTripper struct { // followRedirects indicates if the round tripper should examine responses for redirects and // follow them. followRedirects bool + // requireSameHostRedirects restricts redirect following to only follow redirects to the same host + // as the original request. + requireSameHostRedirects bool } var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} @@ -75,14 +78,18 @@ var _ utilnet.Dialer = &SpdyRoundTripper{} // NewRoundTripper creates a new SpdyRoundTripper that will use // the specified tlsConfig. -func NewRoundTripper(tlsConfig *tls.Config, followRedirects bool) httpstream.UpgradeRoundTripper { - return NewSpdyRoundTripper(tlsConfig, followRedirects) +func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper { + return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) } // NewSpdyRoundTripper creates a new SpdyRoundTripper that will use // the specified tlsConfig. This function is mostly meant for unit tests. -func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects bool) *SpdyRoundTripper { - return &SpdyRoundTripper{tlsConfig: tlsConfig, followRedirects: followRedirects} +func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper { + return &SpdyRoundTripper{ + tlsConfig: tlsConfig, + followRedirects: followRedirects, + requireSameHostRedirects: requireSameHostRedirects, + } } // TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during @@ -257,7 +264,7 @@ func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) ) if s.followRedirects { - conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s) + conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects) } else { clone := utilnet.CloneRequest(req) clone.Header = header diff --git a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go index fb396bca56a..418b13f876a 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper_test.go @@ -282,7 +282,7 @@ func TestRoundTripAndNewConnection(t *testing.T) { t.Fatalf("%s: Error creating request: %s", k, err) } - spdyTransport := NewSpdyRoundTripper(testCase.clientTLS, redirect) + spdyTransport := NewSpdyRoundTripper(testCase.clientTLS, redirect, redirect) var proxierCalled bool var proxyCalledWithHost string @@ -391,8 +391,8 @@ func TestRoundTripRedirects(t *testing.T) { }{ {0, true}, {1, true}, - {10, true}, - {11, false}, + {9, true}, + {10, false}, } for _, test := range tests { t.Run(fmt.Sprintf("with %d redirects", test.redirects), func(t *testing.T) { @@ -425,7 +425,7 @@ func TestRoundTripRedirects(t *testing.T) { t.Fatalf("Error creating request: %s", err) } - spdyTransport := NewSpdyRoundTripper(nil, true) + spdyTransport := NewSpdyRoundTripper(nil, true, true) client := &http.Client{Transport: spdyTransport} resp, err := client.Do(req) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD index 00fba56be91..d38670a8c87 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/BUILD @@ -16,7 +16,12 @@ go_test( "util_test.go", ], embed = [":go_default_library"], - deps = ["//vendor/github.com/spf13/pflag:go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/github.com/spf13/pflag:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", + ], ) go_library( diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go index 8abbdea8255..7c2a5e6286d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go @@ -321,9 +321,10 @@ type Dialer interface { // ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to // originalLocation). It returns the opened net.Conn and the raw response bytes. -func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer) (net.Conn, []byte, error) { +// If requireSameHostRedirects is true, only redirects to the same host are permitted. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { const ( - maxRedirects = 10 + maxRedirects = 9 // Fail on the 10th redirect maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers ) @@ -387,10 +388,6 @@ redirectLoop: resp.Body.Close() // not used - // Reset the connection. - intermediateConn.Close() - intermediateConn = nil - // Prepare to follow the redirect. redirectStr := resp.Header.Get("Location") if redirectStr == "" { @@ -404,6 +401,15 @@ redirectLoop: if err != nil { return nil, nil, fmt.Errorf("malformed Location header: %v", err) } + + // Only follow redirects to the same host. Otherwise, propagate the redirect response back. + if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { + break redirectLoop + } + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil } connToReturn := intermediateConn diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/http_test.go b/staging/src/k8s.io/apimachinery/pkg/util/net/http_test.go index 98bd6497174..ffe8f17ef7d 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/http_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/http_test.go @@ -19,14 +19,23 @@ limitations under the License. package net import ( + "bufio" + "bytes" "crypto/tls" "fmt" + "io/ioutil" "net" "net/http" + "net/http/httptest" "net/url" "os" "reflect" + "strings" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/util/wait" ) func TestGetClientIP(t *testing.T) { @@ -280,3 +289,153 @@ func TestJoinPreservingTrailingSlash(t *testing.T) { }) } } + +func TestConnectWithRedirects(t *testing.T) { + tests := []struct { + desc string + redirects []string + method string // initial request method, empty == GET + expectError bool + expectedRedirects int + newPort bool // special case different port test + }{{ + desc: "relative redirects allowed", + redirects: []string{"/ok"}, + expectedRedirects: 1, + }, { + desc: "redirects to the same host are allowed", + redirects: []string{"http://HOST/ok"}, // HOST replaced with server address in test + expectedRedirects: 1, + }, { + desc: "POST redirects to GET", + method: http.MethodPost, + redirects: []string{"/ok"}, + expectedRedirects: 1, + }, { + desc: "PUT redirects to GET", + method: http.MethodPut, + redirects: []string{"/ok"}, + expectedRedirects: 1, + }, { + desc: "DELETE redirects to GET", + method: http.MethodDelete, + redirects: []string{"/ok"}, + expectedRedirects: 1, + }, { + desc: "9 redirects are allowed", + redirects: []string{"/1", "/2", "/3", "/4", "/5", "/6", "/7", "/8", "/9"}, + expectedRedirects: 9, + }, { + desc: "10 redirects are forbidden", + redirects: []string{"/1", "/2", "/3", "/4", "/5", "/6", "/7", "/8", "/9", "/10"}, + expectError: true, + }, { + desc: "redirect to different host are prevented", + redirects: []string{"http://example.com/foo"}, + expectedRedirects: 0, + }, { + desc: "multiple redirect to different host forbidden", + redirects: []string{"/1", "/2", "/3", "http://example.com/foo"}, + expectedRedirects: 3, + }, { + desc: "redirect to different port is allowed", + redirects: []string{"http://HOST/foo"}, + expectedRedirects: 1, + newPort: true, + }} + + const resultString = "Test output" + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + redirectCount := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Verify redirect request. + if redirectCount > 0 { + expectedURL, err := url.Parse(test.redirects[redirectCount-1]) + require.NoError(t, err, "test URL error") + assert.Equal(t, req.URL.Path, expectedURL.Path, "unknown redirect path") + assert.Equal(t, http.MethodGet, req.Method, "redirects must always be GET") + } + if redirectCount < len(test.redirects) { + http.Redirect(w, req, test.redirects[redirectCount], http.StatusFound) + redirectCount++ + } else if redirectCount == len(test.redirects) { + w.Write([]byte(resultString)) + } else { + t.Errorf("unexpected number of redirects %d to %s", redirectCount, req.URL.String()) + } + })) + defer s.Close() + + u, err := url.Parse(s.URL) + require.NoError(t, err, "Error parsing server URL") + host := u.Host + + // Special case new-port test with a secondary server. + if test.newPort { + s2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(resultString)) + })) + defer s2.Close() + u2, err := url.Parse(s2.URL) + require.NoError(t, err, "Error parsing secondary server URL") + + // Sanity check: secondary server uses same hostname, different port. + require.Equal(t, u.Hostname(), u2.Hostname(), "sanity check: same hostname") + require.NotEqual(t, u.Port(), u2.Port(), "sanity check: different port") + + // Redirect to the secondary server. + host = u2.Host + + } + + // Update redirect URLs with actual host. + for i := range test.redirects { + test.redirects[i] = strings.Replace(test.redirects[i], "HOST", host, 1) + } + + method := test.method + if method == "" { + method = http.MethodGet + } + + netdialer := &net.Dialer{ + Timeout: wait.ForeverTestTimeout, + KeepAlive: wait.ForeverTestTimeout, + } + dialer := DialerFunc(func(req *http.Request) (net.Conn, error) { + conn, err := netdialer.Dial("tcp", req.URL.Host) + if err != nil { + return conn, err + } + if err = req.Write(conn); err != nil { + require.NoError(t, conn.Close()) + return nil, fmt.Errorf("error sending request: %v", err) + } + return conn, err + }) + conn, rawResponse, err := ConnectWithRedirects(method, u, http.Header{} /*body*/, nil, dialer, true) + if test.expectError { + require.Error(t, err, "expected request error") + return + } + + require.NoError(t, err, "unexpected request error") + assert.NoError(t, conn.Close(), "error closing connection") + + resp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(rawResponse)), nil) + require.NoError(t, err, "unexpected request error") + + result, err := ioutil.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) + if test.expectedRedirects < len(test.redirects) { + // Expect the last redirect to be returned. + assert.Equal(t, http.StatusFound, resp.StatusCode, "Final response is not a redirect") + assert.Equal(t, test.redirects[len(test.redirects)-1], resp.Header.Get("Location")) + assert.NotEqual(t, resultString, string(result), "wrong content") + } else { + assert.Equal(t, resultString, string(result), "stream content does not match") + } + }) + } +} diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go index 4d5cd34d487..269c5331046 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -68,6 +68,8 @@ type UpgradeAwareHandler struct { // InterceptRedirects determines whether the proxy should sniff backend responses for redirects, // following them as necessary. InterceptRedirects bool + // RequireSameHostRedirects only allows redirects to the same host. It is only used if InterceptRedirects=true. + RequireSameHostRedirects bool // UseRequestLocation will use the incoming request URL when talking to the backend server. UseRequestLocation bool // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. @@ -256,7 +258,7 @@ func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Reques utilnet.AppendForwardedForHeader(clone) if h.InterceptRedirects { glog.V(6).Infof("Connecting to backend proxy (intercepting redirects) %s\n Headers: %v", &location, clone.Header) - backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade)) + backendConn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, &location, clone.Header, req.Body, utilnet.DialerFunc(h.DialForUpgrade), h.RequireSameHostRedirects) } else { glog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) clone.URL = &location diff --git a/pkg/util/version/BUILD b/staging/src/k8s.io/apimachinery/pkg/util/version/BUILD similarity index 80% rename from pkg/util/version/BUILD rename to staging/src/k8s.io/apimachinery/pkg/util/version/BUILD index 3b53281f771..cb2162de692 100644 --- a/pkg/util/version/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/util/version/BUILD @@ -12,7 +12,8 @@ go_library( "doc.go", "version.go", ], - importpath = "k8s.io/kubernetes/pkg/util/version", + importmap = "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/version", + importpath = "k8s.io/apimachinery/pkg/util/version", ) go_test( diff --git a/pkg/util/version/doc.go b/staging/src/k8s.io/apimachinery/pkg/util/version/doc.go similarity index 90% rename from pkg/util/version/doc.go rename to staging/src/k8s.io/apimachinery/pkg/util/version/doc.go index ebe43152e8e..5b2b22b6d00 100644 --- a/pkg/util/version/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/version/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package version provides utilities for version number comparisons -package version // import "k8s.io/kubernetes/pkg/util/version" +package version // import "k8s.io/apimachinery/pkg/util/version" diff --git a/pkg/util/version/version.go b/staging/src/k8s.io/apimachinery/pkg/util/version/version.go similarity index 100% rename from pkg/util/version/version.go rename to staging/src/k8s.io/apimachinery/pkg/util/version/version.go diff --git a/pkg/util/version/version_test.go b/staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go similarity index 100% rename from pkg/util/version/version_test.go rename to staging/src/k8s.io/apimachinery/pkg/util/version/version_test.go diff --git a/staging/src/k8s.io/apimachinery/pkg/version/doc.go b/staging/src/k8s.io/apimachinery/pkg/version/doc.go index 5e77af7ea9a..29574fd6d58 100644 --- a/staging/src/k8s.io/apimachinery/pkg/version/doc.go +++ b/staging/src/k8s.io/apimachinery/pkg/version/doc.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package version supplies the type for version information collected at build time. // +k8s:openapi-gen=true + +// Package version supplies the type for version information collected at build time. package version // import "k8s.io/apimachinery/pkg/version" diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index 57ebc2154b5..7ee99efd807 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -544,7 +544,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/pborman/uuid", diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go index 04c376f7795..703f467f9fc 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/admission/plugin/webhook/config/apis/webhookadmission // +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=apiserver.config.k8s.io package v1alpha1 diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go index 30af14e74f3..ad3efee0e62 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/testing/testcase.go @@ -43,10 +43,10 @@ var matchEverythingRules = []registrationv1beta1.RuleWithOperations{{ }, }} -var sideEffectsUnknown registrationv1beta1.SideEffectClass = registrationv1beta1.SideEffectClassUnknown -var sideEffectsNone registrationv1beta1.SideEffectClass = registrationv1beta1.SideEffectClassNone -var sideEffectsSome registrationv1beta1.SideEffectClass = registrationv1beta1.SideEffectClassSome -var sideEffectsNoneOnDryRun registrationv1beta1.SideEffectClass = registrationv1beta1.SideEffectClassNoneOnDryRun +var sideEffectsUnknown = registrationv1beta1.SideEffectClassUnknown +var sideEffectsNone = registrationv1beta1.SideEffectClassNone +var sideEffectsSome = registrationv1beta1.SideEffectClassSome +var sideEffectsNoneOnDryRun = registrationv1beta1.SideEffectClassNoneOnDryRun // NewFakeDataSource returns a mock client and informer returning the given webhooks. func NewFakeDataSource(name string, webhooks []registrationv1beta1.Webhook, mutating bool, stopCh <-chan struct{}) (clientset kubernetes.Interface, factory informers.SharedInformerFactory) { diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/doc.go index a89863a35fc..88db1ffa67a 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiserver.k8s.io // Package apiserver is the internal version of the API. -// +groupName=apiserver.k8s.io package apiserver // import "k8s.io/apiserver/pkg/apis/apiserver" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go index 7dd031a793c..82ebd0c455d 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver // +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=apiserver.k8s.io package v1alpha1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go index 34bc671e8d1..deda9cbd63c 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=audit.k8s.io + package audit // import "k8s.io/apiserver/pkg/apis/audit" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/doc.go index 9d936254817..b8f818ffdb2 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1 // import "k8s.io/apiserver/pkg/apis/audit/v1" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go index 27cc4c5ea52..d2cbdd9919b 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1alpha1 // import "k8s.io/apiserver/pkg/apis/audit/v1alpha1" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go index 38814725867..d43a807c3de 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=audit.k8s.io + package v1beta1 // import "k8s.io/apiserver/pkg/apis/audit/v1beta1" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/example/doc.go index 2676eee8197..d8b341a395b 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/doc.go @@ -16,7 +16,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.k8s.io -// + // package example contains an example API used to demonstrate how to create api groups. Moreover, this is // used within tests. package example // import "k8s.io/apiserver/pkg/apis/example" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/doc.go index 4b22d37fb27..33a3ef04b33 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example/v1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example/v1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=example.apiserver.k8s.io + package v1 // import "k8s.io/apiserver/pkg/apis/example/v1" diff --git a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/doc.go b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/doc.go index 5784d44f398..8625365586f 100644 --- a/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/doc.go +++ b/staging/src/k8s.io/apiserver/pkg/apis/example2/v1/doc.go @@ -21,4 +21,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=example2.apiserver.k8s.io + package v1 // import "k8s.io/apiserver/pkg/apis/example2/v1" diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go index 5b3f02344dc..e40e4288ac5 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/create.go @@ -47,7 +47,7 @@ func createHandler(r rest.NamedCreater, scope RequestScope, admit admission.Inte defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { - scope.err(errors.NewBadRequest("dryRun is not supported yet"), w, req) + scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go index 369e06e35c7..ff35fa9dddd 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/delete.go @@ -47,7 +47,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { - scope.err(errors.NewBadRequest("dryRun is not supported yet"), w, req) + scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return } @@ -183,7 +183,7 @@ func DeleteResource(r rest.GracefulDeleter, allowsOptions bool, scope RequestSco func DeleteCollection(r rest.CollectionDeleter, checkBody bool, scope RequestScope, admit admission.Interface) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { - scope.err(errors.NewBadRequest("dryRun is not supported yet"), w, req) + scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return } diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go index fbd6d9ae15b..d73c3fd573e 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/patch.go @@ -55,7 +55,7 @@ func PatchResource(r rest.Patcher, scope RequestScope, admit admission.Interface defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { - scope.err(errors.NewBadRequest("dryRun is not supported yet"), w, req) + scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return } @@ -265,7 +265,7 @@ func (p *jsonPatcher) applyPatchToCurrentObject(currentObject runtime.Object) (r // Apply the patch. patchedObjJS, err := p.applyJSPatch(currentObjJS) if err != nil { - return nil, interpretPatchError(err) + return nil, err } // Construct the resulting typed, unversioned object. @@ -284,9 +284,13 @@ func (p *jsonPatcher) applyJSPatch(versionedJS []byte) (patchedJS []byte, retErr case types.JSONPatchType: patchObj, err := jsonpatch.DecodePatch(p.patchJS) if err != nil { - return nil, err + return nil, errors.NewBadRequest(err.Error()) } - return patchObj.Apply(versionedJS) + patchedJS, err := patchObj.Apply(versionedJS) + if err != nil { + return nil, errors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + } + return patchedJS, nil case types.MergePatchType: return jsonpatch.MergePatch(versionedJS, p.patchJS) default: @@ -415,7 +419,7 @@ func applyPatchToObject( ) error { patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) if err != nil { - return interpretPatchError(err) + return interpretStrategicMergePatchError(err) } // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object @@ -428,8 +432,8 @@ func applyPatchToObject( return nil } -// interpretPatchError interprets the error type and returns an error with appropriate HTTP code. -func interpretPatchError(err error) error { +// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretStrategicMergePatchError(err error) error { switch err { case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: return errors.NewBadRequest(err.Error()) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go index 822ed3e926e..b190f8d299b 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/rest_test.go @@ -101,7 +101,7 @@ func TestPatchAnonymousField(t *testing.T) { } } -func TestPatchInvalid(t *testing.T) { +func TestStrategicMergePatchInvalid(t *testing.T) { testGV := schema.GroupVersion{Group: "", Version: "v"} scheme.AddKnownTypes(testGV, &testPatchType{}) defaulter := runtime.ObjectDefaulter(scheme) @@ -123,6 +123,61 @@ func TestPatchInvalid(t *testing.T) { } } +func TestJSONPatch(t *testing.T) { + for _, test := range []struct { + name string + patch string + expectedError string + expectedErrorType metav1.StatusReason + }{ + { + name: "valid", + patch: `[{"op": "test", "value": "podA", "path": "/metadata/name"}]`, + }, + { + name: "invalid-syntax", + patch: `invalid json patch`, + expectedError: "invalid character 'i' looking for beginning of value", + expectedErrorType: metav1.StatusReasonBadRequest, + }, + { + name: "invalid-semantics", + patch: `[{"op": "test", "value": "podA", "path": "/invalid/path"}]`, + expectedError: "the server rejected our request due to an error in our request", + expectedErrorType: metav1.StatusReasonInvalid, + }, + } { + p := &patcher{ + patchType: types.JSONPatchType, + patchJS: []byte(test.patch), + } + jp := jsonPatcher{p} + codec := codecs.LegacyCodec(examplev1.SchemeGroupVersion) + pod := &examplev1.Pod{} + pod.Name = "podA" + versionedJS, err := runtime.Encode(codec, pod) + if err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + continue + } + _, err = jp.applyJSPatch(versionedJS) + if err != nil { + if len(test.expectedError) == 0 { + t.Errorf("%s: expect no error when applying json patch, but got %v", test.name, err) + continue + } + if err.Error() != test.expectedError { + t.Errorf("%s: expected error %v, but got %v", test.name, test.expectedError, err) + } + if test.expectedErrorType != apierrors.ReasonForError(err) { + t.Errorf("%s: expected error type %v, but got %v", test.name, test.expectedErrorType, apierrors.ReasonForError(err)) + } + } else if len(test.expectedError) > 0 { + t.Errorf("%s: expected err %s", test.name, test.expectedError) + } + } +} + func TestPatchCustomResource(t *testing.T) { testGV := schema.GroupVersion{Group: "mygroup.example.com", Version: "v1beta1"} scheme.AddKnownTypes(testGV, &unstructured.Unstructured{}) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go index fc35346608d..19d23e1f2eb 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/handlers/update.go @@ -49,7 +49,7 @@ func UpdateResource(r rest.Updater, scope RequestScope, admit admission.Interfac defer trace.LogIfLong(500 * time.Millisecond) if isDryRun(req.URL) && !utilfeature.DefaultFeatureGate.Enabled(features.DryRun) { - scope.err(errors.NewBadRequest("dryRun is not supported yet"), w, req) + scope.err(errors.NewBadRequest("the dryRun alpha feature is disabled"), w, req) return } diff --git a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go index 3b692572280..e06f8919fae 100644 --- a/staging/src/k8s.io/apiserver/pkg/features/kube_features.go +++ b/staging/src/k8s.io/apiserver/pkg/features/kube_features.go @@ -29,11 +29,19 @@ const ( // owner: @tallclair // alpha: v1.5 + // beta: v1.6 // // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). StreamingProxyRedirects utilfeature.Feature = "StreamingProxyRedirects" + // owner: @tallclair + // alpha: v1.10 + // + // ValidateProxyRedirects controls whether the apiserver should validate that redirects are only + // followed to the same host. Only used if StreamingProxyRedirects is enabled. + ValidateProxyRedirects utilfeature.Feature = "ValidateProxyRedirects" + // owner: @tallclair // alpha: v1.7 // beta: v1.8 @@ -83,6 +91,7 @@ func init() { // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + ValidateProxyRedirects: {Default: false, PreRelease: utilfeature.Alpha}, AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, Initializers: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD index 4acecd18dfc..d282606c64a 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/BUILD @@ -17,6 +17,8 @@ go_test( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/github.com/stretchr/testify/assert:go_default_library", + "//vendor/github.com/stretchr/testify/require:go_default_library", ], ) diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer.go index acab7652e1f..8bd9d9c9f11 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer.go @@ -18,6 +18,7 @@ package rest import ( "context" + "errors" "io" "net/http" "net/url" @@ -29,13 +30,14 @@ import ( ) // LocationStreamer is a resource that streams the contents of a particular -// location URL +// location URL. type LocationStreamer struct { Location *url.URL Transport http.RoundTripper ContentType string Flush bool ResponseChecker HttpResponseChecker + RedirectChecker func(req *http.Request, via []*http.Request) error } // a LocationStreamer must implement a rest.ResourceStreamer @@ -59,7 +61,10 @@ func (s *LocationStreamer) InputStream(ctx context.Context, apiVersion, acceptHe if transport == nil { transport = http.DefaultTransport } - client := &http.Client{Transport: transport} + client := &http.Client{ + Transport: transport, + CheckRedirect: s.RedirectChecker, + } req, err := http.NewRequest("GET", s.Location.String(), nil) // Pass the parent context down to the request to ensure that the resources // will be release properly. @@ -87,3 +92,8 @@ func (s *LocationStreamer) InputStream(ctx context.Context, apiVersion, acceptHe stream = resp.Body return } + +// PreventRedirects is a redirect checker that prevents the client from following a redirect. +func PreventRedirects(_ *http.Request, _ []*http.Request) error { + return errors.New("redirects forbidden") +} diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer_test.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer_test.go index 11714cc1566..4d9cd154339 100644 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer_test.go +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/rest/streamer_test.go @@ -28,6 +28,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -147,3 +149,23 @@ func TestInputStreamInternalServerErrorTransport(t *testing.T) { t.Errorf("StreamInternalServerError does not match. Got: %s. Expected: %s.", err, expectedError) } } + +func TestInputStreamRedirects(t *testing.T) { + const redirectPath = "/redirect" + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path == redirectPath { + t.Fatal("Redirects should not be followed") + } else { + http.Redirect(w, req, redirectPath, http.StatusFound) + } + })) + loc, err := url.Parse(s.URL) + require.NoError(t, err, "Error parsing server URL") + + streamer := &LocationStreamer{ + Location: loc, + RedirectChecker: PreventRedirects, + } + _, _, _, err = streamer.InputStream(context.Background(), "", "") + assert.Error(t, err, "Redirect should trigger an error") +} diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go index 4c6fd5a39cf..feece9f029a 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config.go @@ -24,6 +24,7 @@ import ( "io" "io/ioutil" "os" + "time" yaml "github.com/ghodss/yaml" @@ -40,6 +41,7 @@ const ( aesGCMTransformerPrefixV1 = "k8s:enc:aesgcm:v1:" secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" kmsTransformerPrefixV1 = "k8s:enc:kms:v1:" + kmsPluginConnectionTimeout = 3 * time.Second ) // GetTransformerOverrides returns the transformer overrides by reading and parsing the encryption provider configuration file @@ -160,7 +162,7 @@ func GetPrefixTransformers(config *ResourceConfig) ([]value.PrefixTransformer, e } // Get gRPC client service with endpoint. - envelopeService, err := envelopeServiceFactory(provider.KMS.Endpoint) + envelopeService, err := envelopeServiceFactory(provider.KMS.Endpoint, kmsPluginConnectionTimeout) if err != nil { return nil, fmt.Errorf("could not configure KMS plugin %q, error: %v", provider.KMS.Name, err) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go index 957d7dedeb9..8cd6027e904 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/encryptionconfig/config_test.go @@ -21,6 +21,7 @@ import ( "encoding/base64" "strings" "testing" + "time" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/storage/value" @@ -239,7 +240,7 @@ func (t *testEnvelopeService) Encrypt(data []byte) ([]byte, error) { } // The factory method to create mock envelope service. -func newMockEnvelopeService(endpoint string) (envelope.Service, error) { +func newMockEnvelopeService(endpoint string, timeout time.Duration) (envelope.Service, error) { return &testEnvelopeService{}, nil } diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go index c9295ed1fad..a39ceeca0da 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service.go @@ -23,6 +23,7 @@ import ( "net" "net/url" "strings" + "sync" "time" "github.com/golang/glog" @@ -39,19 +40,20 @@ const ( // Current version for the protocol interface definition. kmsapiVersion = "v1beta1" - // The timeout that communicate with KMS server. - timeout = 30 * time.Second + versionErrorf = "KMS provider api version %s is not supported, only %s is supported now" ) // The gRPC implementation for envelope.Service. type gRPCService struct { - // gRPC client instance - kmsClient kmsapi.KeyManagementServiceClient - connection *grpc.ClientConn + kmsClient kmsapi.KeyManagementServiceClient + connection *grpc.ClientConn + callTimeout time.Duration + mux sync.RWMutex + versionChecked bool } // NewGRPCService returns an envelope.Service which use gRPC to communicate the remote KMS provider. -func NewGRPCService(endpoint string) (Service, error) { +func NewGRPCService(endpoint string, callTimeout time.Duration) (Service, error) { glog.V(4).Infof("Configure KMS provider with endpoint: %s", endpoint) addr, err := parseEndpoint(endpoint) @@ -59,25 +61,28 @@ func NewGRPCService(endpoint string) (Service, error) { return nil, err } - connection, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(timeout), grpc.WithDialer(unixDial)) + connection, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.FailFast(false)), grpc.WithDialer( + func(string, time.Duration) (net.Conn, error) { + // Ignoring addr and timeout arguments: + // addr - comes from the closure + // timeout - is ignored since we are connecting in a non-blocking configuration + c, err := net.DialTimeout(unixProtocol, addr, 0) + if err != nil { + glog.Errorf("failed to create connection to unix socket: %s, error: %v", addr, err) + } + return c, err + })) + if err != nil { - return nil, fmt.Errorf("connect remote KMS provider %q failed, error: %v", addr, err) + return nil, fmt.Errorf("failed to create connection to %s, error: %v", endpoint, err) } kmsClient := kmsapi.NewKeyManagementServiceClient(connection) - - err = checkAPIVersion(kmsClient) - if err != nil { - connection.Close() - return nil, fmt.Errorf("failed check version for %q, error: %v", addr, err) - } - - return &gRPCService{kmsClient: kmsClient, connection: connection}, nil -} - -// This dialer explicitly ask gRPC to use unix socket as network. -func unixDial(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout(unixProtocol, addr, timeout) + return &gRPCService{ + kmsClient: kmsClient, + connection: connection, + callTimeout: callTimeout, + }, nil } // Parse the endpoint to extract schema, host or path. @@ -106,31 +111,37 @@ func parseEndpoint(endpoint string) (string, error) { return u.Path, nil } -// Check the KMS provider API version. -// Only matching kmsapiVersion is supported now. -func checkAPIVersion(kmsClient kmsapi.KeyManagementServiceClient) error { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() +func (g *gRPCService) checkAPIVersion(ctx context.Context) error { + g.mux.Lock() + defer g.mux.Unlock() + + if g.versionChecked { + return nil + } request := &kmsapi.VersionRequest{Version: kmsapiVersion} - response, err := kmsClient.Version(ctx, request) + response, err := g.kmsClient.Version(ctx, request) if err != nil { return fmt.Errorf("failed get version from remote KMS provider: %v", err) } if response.Version != kmsapiVersion { - return fmt.Errorf("KMS provider api version %s is not supported, only %s is supported now", - response.Version, kmsapiVersion) + return fmt.Errorf(versionErrorf, response.Version, kmsapiVersion) } + g.versionChecked = true - glog.V(4).Infof("KMS provider %s initialized, version: %s", response.RuntimeName, response.RuntimeVersion) + glog.V(4).Infof("Version of KMS provider is %s", response.Version) return nil } // Decrypt a given data string to obtain the original byte data. func (g *gRPCService) Decrypt(cipher []byte) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) defer cancel() + if err := g.checkAPIVersion(ctx); err != nil { + return nil, err + } + request := &kmsapi.DecryptRequest{Cipher: cipher, Version: kmsapiVersion} response, err := g.kmsClient.Decrypt(ctx, request) if err != nil { @@ -141,8 +152,11 @@ func (g *gRPCService) Decrypt(cipher []byte) ([]byte, error) { // Encrypt bytes to a string ciphertext. func (g *gRPCService) Encrypt(plain []byte) ([]byte, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(context.Background(), g.callTimeout) defer cancel() + if err := g.checkAPIVersion(ctx); err != nil { + return nil, err + } request := &kmsapi.EncryptRequest{Plain: plain, Version: kmsapiVersion} response, err := g.kmsClient.Encrypt(ctx, request) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service_unix_test.go b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service_unix_test.go index 6e0aa12bad6..bc40b220c27 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service_unix_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/value/encrypt/envelope/grpc_service_unix_test.go @@ -25,7 +25,9 @@ import ( "fmt" "net" "reflect" + "sync" "testing" + "time" "google.golang.org/grpc" @@ -36,17 +38,143 @@ const ( endpoint = "unix:///@kms-socket.sock" ) -// Normal encryption and decryption operation. -func TestGRPCService(t *testing.T) { - // Start a test gRPC server. - server, err := startTestKMSProvider() +// TestKMSPluginLateStart tests the scenario where kms-plugin pod/container starts after kube-apiserver pod/container. +// Since the Dial to kms-plugin is non-blocking we expect the construction of gRPC service to succeed even when +// kms-plugin is not yet up - dialing happens in the background. +func TestKMSPluginLateStart(t *testing.T) { + callTimeout := 3 * time.Second + + service, err := NewGRPCService(endpoint, callTimeout) + if err != nil { + t.Fatalf("failed to create envelope service, error: %v", err) + } + defer destroyService(service) + + time.Sleep(callTimeout / 2) + f, err := startFakeKMSProvider(kmsapiVersion) if err != nil { t.Fatalf("failed to start test KMS provider server, error: %v", err) } - defer stopTestKMSProvider(server) + defer f.server.Stop() + + data := []byte("test data") + _, err = service.Encrypt(data) + if err != nil { + t.Fatalf("failed when execute encrypt, error: %v", err) + } +} + +// TestIntermittentConnectionLoss tests the scenario where the connection with kms-plugin is intermittently lost. +func TestIntermittentConnectionLoss(t *testing.T) { + var ( + wg1 sync.WaitGroup + wg2 sync.WaitGroup + timeout = 30 * time.Second + blackOut = 1 * time.Second + data = []byte("test data") + ) + // Start KMS Plugin + f, err := startFakeKMSProvider(kmsapiVersion) + if err != nil { + t.Fatalf("failed to start test KMS provider server, error: %v", err) + } + + // connect to kms plugin + service, err := NewGRPCService(endpoint, timeout) + if err != nil { + t.Fatalf("failed to create envelope service, error: %v", err) + } + defer destroyService(service) + + _, err = service.Encrypt(data) + if err != nil { + t.Fatalf("failed when execute encrypt, error: %v", err) + } + t.Log("Connected to KMSPlugin") + + // Stop KMS Plugin - simulating connection loss + f.server.Stop() + t.Log("KMS Plugin is stopped") + + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + // Call service to encrypt data. + t.Log("Sending encrypt request") + wg1.Done() + _, err := service.Encrypt(data) + if err != nil { + t.Fatalf("failed when executing encrypt, error: %v", err) + } + }() + + wg1.Wait() + time.Sleep(blackOut) + // Start KMS Plugin + f, err = startFakeKMSProvider(kmsapiVersion) + if err != nil { + t.Fatalf("failed to start test KMS provider server, error: %v", err) + } + defer f.server.Stop() + t.Log("Restarted KMS Plugin") + + wg2.Wait() +} + +func TestUnsupportedVersion(t *testing.T) { + ver := "invalid" + data := []byte("test data") + wantErr := fmt.Errorf(versionErrorf, ver, kmsapiVersion) + + f, err := startFakeKMSProvider(ver) + if err != nil { + t.Fatalf("failed to start test KMS provider server, error: %ver", err) + } + defer f.server.Stop() + + s, err := NewGRPCService(endpoint, 1*time.Second) + if err != nil { + t.Fatal(err) + } + defer destroyService(s) + + // Encrypt + _, err = s.Encrypt(data) + if err == nil || err.Error() != wantErr.Error() { + t.Errorf("got err: %ver, want: %ver", err, wantErr) + } + + destroyService(s) + + s, err = NewGRPCService(endpoint, 1*time.Second) + if err != nil { + t.Fatal(err) + } + defer destroyService(s) + + // Decrypt + _, err = s.Decrypt(data) + if err == nil || err.Error() != wantErr.Error() { + t.Errorf("got err: %ver, want: %ver", err, wantErr) + } +} + +func TestConcurrentAccess(t *testing.T) { + +} + +// Normal encryption and decryption operation. +func TestGRPCService(t *testing.T) { + // Start a test gRPC server. + f, err := startFakeKMSProvider(kmsapiVersion) + if err != nil { + t.Fatalf("failed to start test KMS provider server, error: %v", err) + } + defer f.server.Stop() // Create the gRPC client service. - service, err := NewGRPCService(endpoint) + service, err := NewGRPCService(endpoint, 1*time.Second) if err != nil { t.Fatalf("failed to create envelope service, error: %v", err) } @@ -70,19 +198,65 @@ func TestGRPCService(t *testing.T) { } } +// Normal encryption and decryption operation by multiple go-routines. +func TestGRPCServiceConcurrentAccess(t *testing.T) { + // Start a test gRPC server. + f, err := startFakeKMSProvider(kmsapiVersion) + if err != nil { + t.Fatalf("failed to start test KMS provider server, error: %v", err) + } + defer f.server.Stop() + + // Create the gRPC client service. + service, err := NewGRPCService(endpoint, 1*time.Second) + if err != nil { + t.Fatalf("failed to create envelope service, error: %v", err) + } + defer destroyService(service) + + var wg sync.WaitGroup + n := 1000 + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer wg.Done() + // Call service to encrypt data. + data := []byte("test data") + cipher, err := service.Encrypt(data) + if err != nil { + t.Errorf("failed when execute encrypt, error: %v", err) + } + + // Call service to decrypt data. + result, err := service.Decrypt(cipher) + if err != nil { + t.Errorf("failed when execute decrypt, error: %v", err) + } + + if !reflect.DeepEqual(data, result) { + t.Errorf("expect: %v, but: %v", data, result) + } + }() + } + + wg.Wait() +} + func destroyService(service Service) { - s := service.(*gRPCService) - s.connection.Close() + if service != nil { + s := service.(*gRPCService) + s.connection.Close() + } } // Test all those invalid configuration for KMS provider. func TestInvalidConfiguration(t *testing.T) { // Start a test gRPC server. - server, err := startTestKMSProvider() + f, err := startFakeKMSProvider(kmsapiVersion) if err != nil { t.Fatalf("failed to start test KMS provider server, error: %v", err) } - defer stopTestKMSProvider(server) + defer f.server.Stop() invalidConfigs := []struct { name string @@ -91,16 +265,12 @@ func TestInvalidConfiguration(t *testing.T) { }{ {"emptyConfiguration", kmsapiVersion, ""}, {"invalidScheme", kmsapiVersion, "tcp://localhost:6060"}, - {"unavailableEndpoint", kmsapiVersion, unixProtocol + ":///kms-socket.nonexist"}, - {"invalidAPIVersion", "invalidVersion", endpoint}, } for _, testCase := range invalidConfigs { t.Run(testCase.name, func(t *testing.T) { - setAPIVersion(testCase.apiVersion) - defer setAPIVersion(kmsapiVersion) - - _, err := NewGRPCService(testCase.endpoint) + f.apiVersion = testCase.apiVersion + _, err := NewGRPCService(testCase.endpoint, 1*time.Second) if err == nil { t.Fatalf("should fail to create envelope service for %s.", testCase.name) } @@ -109,7 +279,7 @@ func TestInvalidConfiguration(t *testing.T) { } // Start the gRPC server that listens on unix socket. -func startTestKMSProvider() (*grpc.Server, error) { +func startFakeKMSProvider(version string) (*fakeKMSPlugin, error) { sockFile, err := parseEndpoint(endpoint) if err != nil { return nil, fmt.Errorf("failed to parse endpoint:%q, error %v", endpoint, err) @@ -119,31 +289,25 @@ func startTestKMSProvider() (*grpc.Server, error) { return nil, fmt.Errorf("failed to listen on the unix socket, error: %v", err) } - server := grpc.NewServer() - kmsapi.RegisterKeyManagementServiceServer(server, &base64Server{}) - go server.Serve(listener) - return server, nil -} - -func stopTestKMSProvider(server *grpc.Server) { - server.Stop() + s := grpc.NewServer() + f := &fakeKMSPlugin{apiVersion: version, server: s} + kmsapi.RegisterKeyManagementServiceServer(s, f) + go s.Serve(listener) + return f, nil } // Fake gRPC sever for remote KMS provider. // Use base64 to simulate encrypt and decrypt. -type base64Server struct{} - -var testProviderAPIVersion = kmsapiVersion - -func setAPIVersion(apiVersion string) { - testProviderAPIVersion = apiVersion +type fakeKMSPlugin struct { + apiVersion string + server *grpc.Server } -func (s *base64Server) Version(ctx context.Context, request *kmsapi.VersionRequest) (*kmsapi.VersionResponse, error) { - return &kmsapi.VersionResponse{Version: testProviderAPIVersion, RuntimeName: "testKMS", RuntimeVersion: "0.0.1"}, nil +func (s *fakeKMSPlugin) Version(ctx context.Context, request *kmsapi.VersionRequest) (*kmsapi.VersionResponse, error) { + return &kmsapi.VersionResponse{Version: s.apiVersion, RuntimeName: "testKMS", RuntimeVersion: "0.0.1"}, nil } -func (s *base64Server) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { +func (s *fakeKMSPlugin) Decrypt(ctx context.Context, request *kmsapi.DecryptRequest) (*kmsapi.DecryptResponse, error) { buf := make([]byte, base64.StdEncoding.DecodedLen(len(request.Cipher))) n, err := base64.StdEncoding.Decode(buf, request.Cipher) if err != nil { @@ -153,7 +317,7 @@ func (s *base64Server) Decrypt(ctx context.Context, request *kmsapi.DecryptReque return &kmsapi.DecryptResponse{Plain: buf[:n]}, nil } -func (s *base64Server) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { +func (s *fakeKMSPlugin) Encrypt(ctx context.Context, request *kmsapi.EncryptRequest) (*kmsapi.EncryptResponse, error) { buf := make([]byte, base64.StdEncoding.EncodedLen(len(request.Plain))) base64.StdEncoding.Encode(buf, request.Plain) return &kmsapi.EncryptResponse{Cipher: buf}, nil diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go index 38cd5bd5dba..551f769d230 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go @@ -342,6 +342,12 @@ func untrustedIssuer(token string) (string, error) { if err := json.Unmarshal(payload, &claims); err != nil { return "", fmt.Errorf("while unmarshaling token: %v", err) } + // Coalesce the legacy GoogleIss with the new one. + // + // http://openid.net/specs/openid-connect-core-1_0.html#GoogleIss + if claims.Issuer == "accounts.google.com" { + return "https://accounts.google.com", nil + } return claims.Issuer, nil } diff --git a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go index 53d849bdb01..fc055af6424 100644 --- a/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go +++ b/staging/src/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go @@ -1365,6 +1365,28 @@ func TestToken(t *testing.T) { }, wantInitErr: true, }, + { + name: "accounts.google.com issuer", + options: Options{ + IssuerURL: "https://accounts.google.com", + ClientID: "my-client", + UsernameClaim: "email", + now: func() time.Time { return now }, + }, + claims: fmt.Sprintf(`{ + "iss": "accounts.google.com", + "email": "thomas.jefferson@gmail.com", + "aud": "my-client", + "exp": %d + }`, valid.Unix()), + signingKey: loadRSAPrivKey(t, "testdata/rsa_1.pem", jose.RS256), + pubKeys: []*jose.JSONWebKey{ + loadRSAKey(t, "testdata/rsa_1.pem", jose.RS256), + }, + want: &user.DefaultInfo{ + Name: "thomas.jefferson@gmail.com", + }, + }, } for _, test := range tests { t.Run(test.name, test.run) diff --git a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json index 9458287ff0b..170cc171518 100644 --- a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json +++ b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json @@ -96,7 +96,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go index 6f75af91b43..8fc2227cf1c 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go @@ -19,6 +19,7 @@ package genericclioptions import ( "fmt" "io/ioutil" + "sort" "strings" "github.com/spf13/cobra" @@ -49,6 +50,7 @@ func (f *JSONPathPrintFlags) AllowedFormats() []string { for format := range jsonFormats { formats = append(formats, format) } + sort.Strings(formats) return formats } diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags_test.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags_test.go index bf0c087bc1b..bba946f4039 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "sort" "strings" "testing" @@ -101,6 +102,9 @@ func TestPrinterSupportsExpectedJSONPathFormats(t *testing.T) { printFlags := JSONPathPrintFlags{ TemplateArgument: templateArg, } + if !sort.StringsAreSorted(printFlags.AllowedFormats()) { + t.Fatalf("allowed formats are not sorted") + } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { @@ -180,6 +184,9 @@ func TestJSONPathPrinterDefaultsAllowMissingKeysToTrue(t *testing.T) { TemplateArgument: &tc.templateArg, AllowMissingKeys: tc.allowMissingKeys, } + if !sort.StringsAreSorted(printFlags.AllowedFormats()) { + t.Fatalf("allowed formats are not sorted") + } outputFormat := "jsonpath" p, err := printFlags.ToPrinter(outputFormat) diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go index 1df9a864665..63f5834d418 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go @@ -68,10 +68,7 @@ func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { // YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. // The input object is assumed to be in the internal version of an API and is converted // to the given version first. -type YAMLPrinter struct { - version string - converter runtime.ObjectConvertor -} +type YAMLPrinter struct{} // PrintObj prints the data as YAML. func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go index eeae7ac7d6e..08954b24173 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go @@ -19,6 +19,7 @@ package genericclioptions import ( "fmt" "io/ioutil" + "sort" "strings" "github.com/spf13/cobra" @@ -51,6 +52,7 @@ func (f *GoTemplatePrintFlags) AllowedFormats() []string { for format := range templateFormats { formats = append(formats, format) } + sort.Strings(formats) return formats } diff --git a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags_test.go b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags_test.go index e1f5ae60e5c..194a335e420 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/genericclioptions/template_flags_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "sort" "strings" "testing" @@ -101,6 +102,9 @@ func TestPrinterSupportsExpectedTemplateFormats(t *testing.T) { printFlags := GoTemplatePrintFlags{ TemplateArgument: templateArg, } + if !sort.StringsAreSorted(printFlags.AllowedFormats()) { + t.Fatalf("allowed formats are not sorted") + } p, err := printFlags.ToPrinter(tc.outputFormat) if tc.expectNoMatch { @@ -174,6 +178,9 @@ func TestTemplatePrinterDefaultsAllowMissingKeysToTrue(t *testing.T) { TemplateArgument: &tc.templateArg, AllowMissingKeys: tc.allowMissingKeys, } + if !sort.StringsAreSorted(printFlags.AllowedFormats()) { + t.Fatalf("allowed formats are not sorted") + } outputFormat := "template" p, err := printFlags.ToPrinter(outputFormat) diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 7966502990b..9076b4032cb 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -168,7 +168,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", diff --git a/staging/src/k8s.io/client-go/INSTALL.md b/staging/src/k8s.io/client-go/INSTALL.md new file mode 100644 index 00000000000..d3592f490aa --- /dev/null +++ b/staging/src/k8s.io/client-go/INSTALL.md @@ -0,0 +1,162 @@ +# Installing client-go + +## For the casual user + +If you want to write a simple script, don't care about a reproducible client +library install, don't mind getting head (which may be less stable than a +particular release), then simply: + +```sh +$ go get k8s.io/client-go/... +``` + +This will install `k8s.io/client-go` in your `$GOPATH`. `k8s.io/client-go` +includes most of its own dependencies in its `k8s.io/client-go/vendor` path, +except for `k8s.io/apimachinery` and `glog`. `go get` will recursively download +these excluded repos to your `$GOPATH`, if they don't already exist. If +`k8s.io/apimachinery` preexisted in `$GOPATH`, you also need to: + +```sh +$ go get -u k8s.io/apimachinery/... +``` + +because the head of client-go is only guaranteed to work with the head of +apimachinery. + +We excluded `k8s.io/apimachinery` and `glog` from `k8s.io/client-go/vendor` to +prevent `go get` users from hitting issues like +[#19](https://github.com/kubernetes/client-go/issues/19) and +[#83](https://github.com/kubernetes/client-go/issues/83). If your project share +other dependencies with client-go, and you hit issues similar to #19 or #83, +then you'll need to look down at the next section. + +Note: the official go policy is that libraries should not vendor their +dependencies. This is unworkable for us, since our dependencies change and HEAD +on every dependency has not necessarily been tested with client-go. In fact, +HEAD from all dependencies may not even compile with client-go! + +## Dependency management for the serious (or reluctant) user + +Reasons why you might need to use a dependency management system: +* You use a dependency that client-go also uses, and don't want two copies of + the dependency compiled into your application. For some dependencies with + singletons or global inits (e.g. `glog`) this wouldn't even compile... +* You want to lock in a particular version (so you don't have to change your + code every time we change a public interface). +* You want your install to be reproducible. For example, for your CI system or + for new team members. + +There are three tools you could in theory use for this. Instructions +for each follows. + +### Godep + +[godep](https://github.com/tools/godep) is an older dependency management tool, which is +used by the main Kubernetes repo and `client-go` to manage dependencies. + +Before proceeding with the below instructions, you should ensure that your +$GOPATH is empty except for containing your own package and its dependencies, +and you have a copy of godep somewhere in your $PATH. + +To install `client-go` and place its dependencies in your `$GOPATH`: + +```sh +go get k8s.io/client-go/... +cd $GOPATH/src/k8s.io/client-go +git checkout v9.0.0 # replace v9.0.0 with the required version +# cd 1.5 # only necessary with 1.5 and 1.4 clients. +godep restore ./... +``` + +At this point, `client-go`'s dependencies have been placed in your $GOPATH, but +if you were to build, `client-go` would still see its own copy of its +dependencies in its `vendor` directory. You have two options at this point. + +If you would like to keep dependencies in your own project's vendor directory, +then you can continue like this: + +```sh +cd $GOPATH/src/ +godep save ./... +``` + +Alternatively, if you want to build using the dependencies in your `$GOPATH`, +then `rm -rf vendor/` to remove `client-go`'s copy of its dependencies. + +### Glide + +[Glide](https://github.com/Masterminds/glide) is another popular dependency +management tool for Go. Glide will manage your /vendor directory, but unlike +godep, will not use or modify your $GOPATH (there's no equivalent of +`godep restore` or `godep save`). + +Generally, it's best to avoid Glide's many subcommands, favoring modifying +Glide's manifest file (`glide.yaml`) directly, then running +`glide update --strip-vendor`. First create a `glide.yaml` file at the root of +your project: + +```yaml +package: ( your project's import path ) # e.g. github.com/foo/bar +import: +- package: k8s.io/client-go + version: v9.0.0 # replace v9.0.0 with the required version +``` + +Second, add a Go file that imports `client-go` somewhere in your project, +otherwise `client-go`'s dependencies will not be added to your project's +vendor/. Then run the following command in the same directory as `glide.yaml`: + +```sh +glide update --strip-vendor +``` + +This can also be abbreviated as: + +```sh +glide up -v +``` + +At this point, `k8s.io/client-go` should be added to your project's vendor/. +`client-go`'s dependencies should be flattened and be added to your project's +vendor/ as well. + +Glide will detect the versions of dependencies `client-go` specified in +`client-go`'s Godep.json file, and automatically set the versions of these +imports in your /vendor directory. It will also record the detected version of +all dependencies in the `glide.lock` file. + +Projects that require a different version of a dependency than `client-go` +requests can override the version manually in `glide.yaml`. For example: + +```yaml +package: ( your project's import path ) # e.g. github.com/foo/bar +import: +- package: k8s.io/client-go + version: v9.0.0 # replace v9.0.0 with the required version +# Use a newer version of go-spew even though client-go wants an old one. +- package: github.com/davecgh/go-spew + version: v1.1.0 +``` + +After modifying, run `glide up -v` again to re-populate your /vendor directory. + +Optionally, Glide users can also use [`glide-vc`](https://github.com/sgotti/glide-vc) +after running `glide up -v` to remove unused files from /vendor. + +### Dep (Not supported yet!) + +[dep](https://github.com/golang/dep) is an up-and-coming dependency management +tool, which has the goal of being accepted as part of the standard go toolchain. +However, client-go does **NOT** work well with `dep` yet. To support `dep`, we +need to fix at least two issues: +1. publish native `Gopkg.toml` in client-go and other k8s.io repos, like `k8s.io/apimachinery`; +2. find a way to express transitive constraints (see https://github.com/golang/dep/issues/1124). + +As a workaround, which may or may not be worthwhile, you can specify all +client-go dependencies manually as +[override](https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md#override) +in Gopkg.toml with the versions listed in [Godeps.json](./Godeps/Godeps.json), +and manually update them when you upgrade client-go version. + +We are actively working on the two issues blocking using `dep`. For the +meantime, we recommend using `glide` or `godeps`. diff --git a/staging/src/k8s.io/client-go/dynamic/BUILD b/staging/src/k8s.io/client-go/dynamic/BUILD index 97598fb563f..8c6297220cc 100644 --- a/staging/src/k8s.io/client-go/dynamic/BUILD +++ b/staging/src/k8s.io/client-go/dynamic/BUILD @@ -59,6 +59,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//staging/src/k8s.io/client-go/dynamic/dynamiclister:all-srcs", "//staging/src/k8s.io/client-go/dynamic/fake:all-srcs", ], tags = ["automanaged"], diff --git a/staging/src/k8s.io/client-go/dynamic/dynamiclister/BUILD b/staging/src/k8s.io/client-go/dynamic/dynamiclister/BUILD new file mode 100644 index 00000000000..5c65e9ab2fa --- /dev/null +++ b/staging/src/k8s.io/client-go/dynamic/dynamiclister/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = [ + "interface.go", + "lister.go", + ], + importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/dynamic/dynamiclister", + importpath = "k8s.io/client-go/dynamic/dynamiclister", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["lister_test.go"], + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//staging/src/k8s.io/client-go/tools/cache:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/client-go/dynamic/dynamiclister/interface.go b/staging/src/k8s.io/client-go/dynamic/dynamiclister/interface.go new file mode 100644 index 00000000000..c39cbee925b --- /dev/null +++ b/staging/src/k8s.io/client-go/dynamic/dynamiclister/interface.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" +) + +// Lister helps list resources. +type Lister interface { + // List lists all resources in the indexer. + List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) + // Get retrieves a resource from the indexer with the given name + Get(name string) (*unstructured.Unstructured, error) + // Namespace returns an object that can list and get resources in a given namespace. + Namespace(namespace string) NamespaceLister +} + +// NamespaceLister helps list and get resources. +type NamespaceLister interface { + // List lists all resources in the indexer for a given namespace. + List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) + // Get retrieves a resource from the indexer for a given namespace and name. + Get(name string) (*unstructured.Unstructured, error) +} diff --git a/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister.go b/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister.go new file mode 100644 index 00000000000..a50fc471e90 --- /dev/null +++ b/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +var _ Lister = &dynamicLister{} +var _ NamespaceLister = &dynamicNamespaceLister{} + +// dynamicLister implements the Lister interface. +type dynamicLister struct { + indexer cache.Indexer + gvr schema.GroupVersionResource +} + +// New returns a new Lister. +func New(indexer cache.Indexer, gvr schema.GroupVersionResource) Lister { + return &dynamicLister{indexer: indexer, gvr: gvr} +} + +// List lists all resources in the indexer. +func (l *dynamicLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { + err = cache.ListAll(l.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*unstructured.Unstructured)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer with the given name +func (l *dynamicLister) Get(name string) (*unstructured.Unstructured, error) { + obj, exists, err := l.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*unstructured.Unstructured), nil +} + +// Namespace returns an object that can list and get resources from a given namespace. +func (l *dynamicLister) Namespace(namespace string) NamespaceLister { + return &dynamicNamespaceLister{indexer: l.indexer, namespace: namespace, gvr: l.gvr} +} + +// dynamicNamespaceLister implements the NamespaceLister interface. +type dynamicNamespaceLister struct { + indexer cache.Indexer + namespace string + gvr schema.GroupVersionResource +} + +// List lists all resources in the indexer for a given namespace. +func (l *dynamicNamespaceLister) List(selector labels.Selector) (ret []*unstructured.Unstructured, err error) { + err = cache.ListAllByNamespace(l.indexer, l.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*unstructured.Unstructured)) + }) + return ret, err +} + +// Get retrieves a resource from the indexer for a given namespace and name. +func (l *dynamicNamespaceLister) Get(name string) (*unstructured.Unstructured, error) { + obj, exists, err := l.indexer.GetByKey(l.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(l.gvr.GroupResource(), name) + } + return obj.(*unstructured.Unstructured), nil +} diff --git a/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister_test.go b/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister_test.go new file mode 100644 index 00000000000..9ebc4794664 --- /dev/null +++ b/staging/src/k8s.io/client-go/dynamic/dynamiclister/lister_test.go @@ -0,0 +1,257 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiclister_test + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/dynamic/dynamiclister" + "k8s.io/client-go/tools/cache" +) + +func TestNamespaceGetMethod(t *testing.T) { + tests := []struct { + name string + existingObjects []runtime.Object + namespaceToSync string + gvrToSync schema.GroupVersionResource + objectToGet string + expectedObject *unstructured.Unstructured + expectError bool + }{ + { + name: "scenario 1: gets name-foo1 resource from the indexer from ns-foo namespace", + existingObjects: []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + newUnstructured("group/version", "TheKind", "ns-bar", "name-bar"), + }, + namespaceToSync: "ns-foo", + gvrToSync: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}, + objectToGet: "name-foo1", + expectedObject: newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + }, + { + name: "scenario 2: gets name-foo-non-existing resource from the indexer from ns-foo namespace", + existingObjects: []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + newUnstructured("group/version", "TheKind", "ns-bar", "name-bar"), + }, + namespaceToSync: "ns-foo", + gvrToSync: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}, + objectToGet: "name-foo-non-existing", + expectError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // test data + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, obj := range test.existingObjects { + err := indexer.Add(obj) + if err != nil { + t.Fatal(err) + } + } + // act + target := dynamiclister.New(indexer, test.gvrToSync).Namespace(test.namespaceToSync) + actualObject, err := target.Get(test.objectToGet) + + // validate + if test.expectError { + if err == nil { + t.Fatal("expected to get an error but non was returned") + } + return + } + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.expectedObject, actualObject) { + t.Fatalf("unexpected object has been returned expected = %v actual = %v, diff = %v", test.expectedObject, actualObject, diff.ObjectDiff(test.expectedObject, actualObject)) + } + }) + } +} + +func TestNamespaceListMethod(t *testing.T) { + // test data + objs := []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + newUnstructured("group/version", "TheKind", "ns-bar", "name-bar"), + } + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, obj := range objs { + err := indexer.Add(obj) + if err != nil { + t.Fatal(err) + } + } + expectedOutput := []*unstructured.Unstructured{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + } + namespaceToList := "ns-foo" + + // act + target := dynamiclister.New(indexer, schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}).Namespace(namespaceToList) + actualOutput, err := target.List(labels.Everything()) + + // validate + if err != nil { + t.Fatal(err) + } + assertListOrDie(expectedOutput, actualOutput, t) +} + +func TestListerGetMethod(t *testing.T) { + tests := []struct { + name string + existingObjects []runtime.Object + namespaceToSync string + gvrToSync schema.GroupVersionResource + objectToGet string + expectedObject *unstructured.Unstructured + expectError bool + }{ + { + name: "scenario 1: gets name-foo1 resource from the indexer", + existingObjects: []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "", "name-foo1"), + newUnstructured("group/version", "TheKind", "ns-bar", "name-bar"), + }, + namespaceToSync: "", + gvrToSync: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}, + objectToGet: "name-foo1", + expectedObject: newUnstructured("group/version", "TheKind", "", "name-foo1"), + }, + { + name: "scenario 2: doesn't get name-foo resource from the indexer from ns-foo namespace", + existingObjects: []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), + newUnstructured("group/version", "TheKind", "ns-bar", "name-bar"), + }, + namespaceToSync: "ns-foo", + gvrToSync: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}, + objectToGet: "name-foo", + expectError: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // test data + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, obj := range test.existingObjects { + err := indexer.Add(obj) + if err != nil { + t.Fatal(err) + } + } + // act + target := dynamiclister.New(indexer, test.gvrToSync) + actualObject, err := target.Get(test.objectToGet) + + // validate + if test.expectError { + if err == nil { + t.Fatal("expected to get an error but non was returned") + } + return + } + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.expectedObject, actualObject) { + t.Fatalf("unexpected object has been returned expected = %v actual = %v, diff = %v", test.expectedObject, actualObject, diff.ObjectDiff(test.expectedObject, actualObject)) + } + }) + } +} + +func TestListerListMethod(t *testing.T) { + // test data + objs := []runtime.Object{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-bar"), + } + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, obj := range objs { + err := indexer.Add(obj) + if err != nil { + t.Fatal(err) + } + } + expectedOutput := []*unstructured.Unstructured{ + newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"), + newUnstructured("group/version", "TheKind", "ns-foo", "name-bar"), + } + + // act + target := dynamiclister.New(indexer, schema.GroupVersionResource{Group: "group", Version: "version", Resource: "TheKinds"}) + actualOutput, err := target.List(labels.Everything()) + + // validate + if err != nil { + t.Fatal(err) + } + assertListOrDie(expectedOutput, actualOutput, t) +} + +func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": apiVersion, + "kind": kind, + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + }, + } +} + +func assertListOrDie(expected, actual []*unstructured.Unstructured, t *testing.T) { + if len(actual) != len(expected) { + t.Fatalf("unexpected number of items returned, expected = %d, actual = %d", len(expected), len(actual)) + } + for _, expectedObject := range expected { + found := false + for _, actualObject := range actual { + if actualObject.GetName() == expectedObject.GetName() { + if !reflect.DeepEqual(expectedObject, actualObject) { + t.Fatalf("unexpected object has been returned expected = %v actual = %v, diff = %v", expectedObject, actualObject, diff.ObjectDiff(expectedObject, actualObject)) + } + found = true + } + } + if !found { + t.Fatalf("the resource with the name = %s was not found in the returned output", expectedObject.GetName()) + } + } +} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go index 2f0d8e95370..f3b5e93ab0d 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go @@ -26,8 +26,9 @@ func (c *FakeEvictions) Evict(eviction *policy.Eviction) error { action := core.GetActionImpl{} action.Verb = "post" action.Namespace = c.ns - action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"} + action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} action.Subresource = "eviction" + action.Name = eviction.Name _, err := c.Fake.Invokes(action, eviction) return err } diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index d06482d554d..b99459757e5 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io + package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index 016adb28a74..19ab7761400 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index fbcd9b7fea2..22d1c588bc7 100644 --- a/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,4 +20,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io + package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/staging/src/k8s.io/client-go/pkg/version/doc.go b/staging/src/k8s.io/client-go/pkg/version/doc.go index 30399fb0201..05e997e1335 100644 --- a/staging/src/k8s.io/client-go/pkg/version/doc.go +++ b/staging/src/k8s.io/client-go/pkg/version/doc.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +k8s:openapi-gen=true + // Package version supplies version information collected at build time to // kubernetes components. -// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go index 193d0c727bf..2bc6c4474bb 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go @@ -174,7 +174,13 @@ func parseScopes(gcpConfig map[string]string) []string { } func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister} + var resetCache map[string]string + if cts, ok := g.tokenSource.(*cachedTokenSource); ok { + resetCache = cts.baseCache() + } else { + resetCache = make(map[string]string) + } + return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister, resetCache} } func (g *gcpAuthProvider) Login() error { return nil } @@ -247,6 +253,19 @@ func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string { return ret } +// baseCache is the base configuration value for this TokenSource, without any cached ephemeral tokens. +func (t *cachedTokenSource) baseCache() map[string]string { + t.lk.Lock() + defer t.lk.Unlock() + ret := map[string]string{} + for k, v := range t.cache { + ret[k] = v + } + delete(ret, "access-token") + delete(ret, "expiry") + return ret +} + type commandTokenSource struct { cmd string args []string @@ -337,6 +356,7 @@ func parseJSONPath(input interface{}, name, template string) (string, error) { type conditionalTransport struct { oauthTransport *oauth2.Transport persister restclient.AuthProviderConfigPersister + resetCache map[string]string } var _ net.RoundTripperWrapper = &conditionalTransport{} @@ -354,8 +374,7 @@ func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, err if res.StatusCode == 401 { glog.V(4).Infof("The credentials that were supplied are invalid for the target cluster") - emptyCache := make(map[string]string) - t.persister.Persist(emptyCache) + t.persister.Persist(t.resetCache) } return res, nil diff --git a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_test.go b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_test.go index 32271b252b8..c8fbb351613 100644 --- a/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_test.go +++ b/staging/src/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp_test.go @@ -442,37 +442,61 @@ func (t *MockTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.res, nil } -func TestClearingCredentials(t *testing.T) { +func Test_cmdTokenSource_roundTrip(t *testing.T) { + accessToken := "fakeToken" fakeExpiry := time.Now().Add(time.Hour) - - cache := map[string]string{ - "access-token": "fakeToken", - "expiry": fakeExpiry.String(), + fakeExpiryStr := fakeExpiry.Format(time.RFC3339Nano) + fs := &fakeTokenSource{ + token: &oauth2.Token{ + AccessToken: accessToken, + Expiry: fakeExpiry, + }, } - cts := cachedTokenSource{ - source: nil, - accessToken: cache["access-token"], - expiry: fakeExpiry, - persister: nil, - cache: nil, + cmdCache := map[string]string{ + "cmd-path": "/path/to/tokensource/cmd", + "cmd-args": "--output=json", + } + cmdCacheUpdated := map[string]string{ + "cmd-path": "/path/to/tokensource/cmd", + "cmd-args": "--output=json", + "access-token": accessToken, + "expiry": fakeExpiryStr, + } + simpleCacheUpdated := map[string]string{ + "access-token": accessToken, + "expiry": fakeExpiryStr, } tests := []struct { - name string - res http.Response - cache map[string]string + name string + res http.Response + baseCache, expectedCache map[string]string }{ { "Unauthorized", http.Response{StatusCode: 401}, make(map[string]string), + make(map[string]string), + }, + { + "Unauthorized, nonempty defaultCache", + http.Response{StatusCode: 401}, + cmdCache, + cmdCache, }, { "Authorized", http.Response{StatusCode: 200}, - cache, + make(map[string]string), + simpleCacheUpdated, + }, + { + "Authorized, nonempty defaultCache", + http.Response{StatusCode: 200}, + cmdCache, + cmdCacheUpdated, }, } @@ -480,17 +504,23 @@ func TestClearingCredentials(t *testing.T) { req := http.Request{Header: http.Header{}} for _, tc := range tests { - authProvider := gcpAuthProvider{&cts, persister} + cts, err := newCachedTokenSource(accessToken, fakeExpiry.String(), persister, fs, tc.baseCache) + if err != nil { + t.Fatalf("unexpected error from newCachedTokenSource: %v", err) + } + authProvider := gcpAuthProvider{cts, persister} fakeTransport := MockTransport{&tc.res} - transport := (authProvider.WrapTransport(&fakeTransport)) - persister.Persist(cache) + // call Token to persist/update cache + if _, err := cts.Token(); err != nil { + t.Fatalf("unexpected error from cachedTokenSource.Token(): %v", err) + } transport.RoundTrip(&req) - if got := persister.read(); !reflect.DeepEqual(got, tc.cache) { - t.Errorf("got cache %v, want %v", got, tc.cache) + if got := persister.read(); !reflect.DeepEqual(got, tc.expectedCache) { + t.Errorf("got cache %v, want %v", got, tc.expectedCache) } } diff --git a/staging/src/k8s.io/client-go/rest/request.go b/staging/src/k8s.io/client-go/rest/request.go index 69ce1c7595a..9bb311448ab 100644 --- a/staging/src/k8s.io/client-go/rest/request.go +++ b/staging/src/k8s.io/client-go/rest/request.go @@ -455,17 +455,9 @@ func (r *Request) URL() *url.URL { // finalURLTemplate is similar to URL(), but will make all specific parameter values equal // - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query -// parameters will be reset. This creates a copy of the request so as not to change the -// underlying object. This means some useful request info (like the types of field -// selectors in use) will be lost. -// TODO: preserve field selector keys +// parameters will be reset. This creates a copy of the url so as not to change the +// underlying object. func (r Request) finalURLTemplate() url.URL { - if len(r.resourceName) != 0 { - r.resourceName = "{name}" - } - if r.namespaceSet && len(r.namespace) != 0 { - r.namespace = "{namespace}" - } newParams := url.Values{} v := []string{"{value}"} for k := range r.params { @@ -473,6 +465,59 @@ func (r Request) finalURLTemplate() url.URL { } r.params = newParams url := r.URL() + segments := strings.Split(r.URL().Path, "/") + groupIndex := 0 + index := 0 + if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) { + groupIndex += len(strings.Split(r.baseURL.Path, "/")) + } + if groupIndex >= len(segments) { + return *url + } + + const CoreGroupPrefix = "api" + const NamedGroupPrefix = "apis" + isCoreGroup := segments[groupIndex] == CoreGroupPrefix + isNamedGroup := segments[groupIndex] == NamedGroupPrefix + if isCoreGroup { + // checking the case of core group with /api/v1/... format + index = groupIndex + 2 + } else if isNamedGroup { + // checking the case of named group with /apis/apps/v1/... format + index = groupIndex + 3 + } else { + // this should not happen that the only two possibilities are /api... and /apis..., just want to put an + // outlet here in case more API groups are added in future if ever possible: + // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups + // if a wrong API groups name is encountered, return the {prefix} for url.Path + url.Path = "/{prefix}" + url.RawQuery = "" + return *url + } + //switch segLength := len(segments) - index; segLength { + switch { + // case len(segments) - index == 1: + // resource (with no name) do nothing + case len(segments)-index == 2: + // /$RESOURCE/$NAME: replace $NAME with {name} + segments[index+1] = "{name}" + case len(segments)-index == 3: + if segments[index+2] == "finalize" || segments[index+2] == "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+1] = "{name}" + } else { + // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace} + segments[index+1] = "{namespace}" + } + case len(segments)-index >= 4: + segments[index+1] = "{namespace}" + // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name} + if segments[index+3] != "finalize" && segments[index+3] != "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+3] = "{name}" + } + } + url.Path = path.Join(segments...) return *url } diff --git a/staging/src/k8s.io/client-go/rest/request_test.go b/staging/src/k8s.io/client-go/rest/request_test.go index f75ee86cbe4..2660c0be5d4 100755 --- a/staging/src/k8s.io/client-go/rest/request_test.go +++ b/staging/src/k8s.io/client-go/rest/request_test.go @@ -340,21 +340,169 @@ func TestResultIntoWithNoBodyReturnsErr(t *testing.T) { } func TestURLTemplate(t *testing.T) { - uri, _ := url.Parse("http://localhost") - r := NewRequest(nil, "POST", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0) - r.Prefix("pre1").Resource("r1").Namespace("ns").Name("nm").Param("p0", "v0") - full := r.URL() - if full.String() != "http://localhost/pre1/namespaces/ns/r1/nm?p0=v0" { - t.Errorf("unexpected initial URL: %s", full) + uri, _ := url.Parse("http://localhost/some/base/url/path") + testCases := []struct { + Request *Request + ExpectedFullURL string + ExpectedFinalURL string + }{ + { + // non dynamic client + Request: NewRequest(nil, "POST", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("api", "v1").Resource("r1").Namespace("ns").Name("nm").Param("p0", "v0"), + ExpectedFullURL: "http://localhost/some/base/url/path/api/v1/namespaces/ns/r1/nm?p0=v0", + ExpectedFinalURL: "http://localhost/some/base/url/path/api/v1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D?p0=%7Bvalue%7D", + }, + { + // non dynamic client with wrong api group + Request: NewRequest(nil, "POST", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("pre1", "v1").Resource("r1").Namespace("ns").Name("nm").Param("p0", "v0"), + ExpectedFullURL: "http://localhost/some/base/url/path/pre1/v1/namespaces/ns/r1/nm?p0=v0", + ExpectedFinalURL: "http://localhost/%7Bprefix%7D", + }, + { + // dynamic client with core group + namespace + resourceResource (with name) + // /api/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/api/v1/namespaces/ns/r1/name1"), + ExpectedFullURL: "http://localhost/some/base/url/path/api/v1/namespaces/ns/r1/name1", + ExpectedFinalURL: "http://localhost/some/base/url/path/api/v1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D", + }, + { + // dynamic client with named group + namespace + resourceResource (with name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/g1/v1/namespaces/ns/r1/name1"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/g1/v1/namespaces/ns/r1/name1", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/g1/v1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D", + }, + { + // dynamic client with core group + namespace + resourceResource (with NO name) + // /api/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/api/v1/namespaces/ns/r1"), + ExpectedFullURL: "http://localhost/some/base/url/path/api/v1/namespaces/ns/r1", + ExpectedFinalURL: "http://localhost/some/base/url/path/api/v1/namespaces/%7Bnamespace%7D/r1", + }, + { + // dynamic client with named group + namespace + resourceResource (with NO name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/g1/v1/namespaces/ns/r1"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/g1/v1/namespaces/ns/r1", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/g1/v1/namespaces/%7Bnamespace%7D/r1", + }, + { + // dynamic client with core group + resourceResource (with name) + // /api/$RESOURCEVERSION/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/api/v1/r1/name1"), + ExpectedFullURL: "http://localhost/some/base/url/path/api/v1/r1/name1", + ExpectedFinalURL: "http://localhost/some/base/url/path/api/v1/r1/%7Bname%7D", + }, + { + // dynamic client with named group + resourceResource (with name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/g1/v1/r1/name1"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/g1/v1/r1/name1", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/g1/v1/r1/%7Bname%7D", + }, + { + // dynamic client with named group + namespace + resourceResource (with name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME/$SUBRESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces/finalize"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces/finalize", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bnamespace%7D/namespaces/%7Bname%7D/finalize", + }, + { + // dynamic client with named group + namespace + resourceResource (with name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bnamespace%7D/namespaces/%7Bname%7D", + }, + { + // dynamic client with named group + namespace + resourceResource (with NO name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%SUBRESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/namespaces/finalize"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/namespaces/finalize", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bnamespace%7D/namespaces/finalize", + }, + { + // dynamic client with named group + namespace + resourceResource (with NO name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%SUBRESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/namespaces/status"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/namespaces/status", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bnamespace%7D/namespaces/status", + }, + { + // dynamic client with named group + namespace + resourceResource (with no name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/namespaces"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/namespaces", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bnamespace%7D/namespaces", + }, + { + // dynamic client with named group + resourceResource (with name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/finalize"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/finalize", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bname%7D/finalize", + }, + { + // dynamic client with named group + resourceResource (with name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces/status"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces/status", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bname%7D/status", + }, + { + // dynamic client with named group + resourceResource (with name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces/namespaces"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/namespaces", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces/%7Bname%7D", + }, + { + // dynamic client with named group + resourceResource (with no name) + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/$RESOURCE/%NAME + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/apis/namespaces/namespaces/namespaces"), + ExpectedFullURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces", + ExpectedFinalURL: "http://localhost/some/base/url/path/apis/namespaces/namespaces/namespaces", + }, + { + // dynamic client with wrong api group + namespace + resourceResource (with name) + subresource + // /apis/$NAMEDGROUPNAME/$RESOURCEVERSION/namespaces/$NAMESPACE/$RESOURCE/%NAME/$SUBRESOURCE + Request: NewRequest(nil, "DELETE", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0). + Prefix("/pre1/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces/finalize"), + ExpectedFullURL: "http://localhost/some/base/url/path/pre1/namespaces/namespaces/namespaces/namespaces/namespaces/namespaces/finalize", + ExpectedFinalURL: "http://localhost/%7Bprefix%7D", + }, } - actualURL := r.finalURLTemplate() - actual := actualURL.String() - expected := "http://localhost/pre1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D?p0=%7Bvalue%7D" - if actual != expected { - t.Errorf("unexpected URL template: %s %s", actual, expected) - } - if r.URL().String() != full.String() { - t.Errorf("creating URL template changed request: %s -> %s", full.String(), r.URL().String()) + for i, testCase := range testCases { + r := testCase.Request + full := r.URL() + if full.String() != testCase.ExpectedFullURL { + t.Errorf("%d: unexpected initial URL: %s %s", i, full, testCase.ExpectedFullURL) + } + actualURL := r.finalURLTemplate() + actual := actualURL.String() + if actual != testCase.ExpectedFinalURL { + t.Errorf("%d: unexpected URL template: %s %s", i, actual, testCase.ExpectedFinalURL) + } + if r.URL().String() != full.String() { + t.Errorf("%d, creating URL template changed request: %s -> %s", i, full.String(), r.URL().String()) + } } } diff --git a/staging/src/k8s.io/client-go/scale/interfaces.go b/staging/src/k8s.io/client-go/scale/interfaces.go index 4668c7417d1..13f2cfb8e78 100644 --- a/staging/src/k8s.io/client-go/scale/interfaces.go +++ b/staging/src/k8s.io/client-go/scale/interfaces.go @@ -34,6 +34,6 @@ type ScaleInterface interface { // Get fetches the scale of the given scalable resource. Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) - // Update updates the scale of the the given scalable resource. + // Update updates the scale of the given scalable resource. Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) } diff --git a/staging/src/k8s.io/client-go/tools/cache/heap.go b/staging/src/k8s.io/client-go/tools/cache/heap.go index 78e492455ea..7357ff97a1f 100644 --- a/staging/src/k8s.io/client-go/tools/cache/heap.go +++ b/staging/src/k8s.io/client-go/tools/cache/heap.go @@ -204,7 +204,7 @@ func (h *Heap) AddIfNotPresent(obj interface{}) error { return nil } -// addIfNotPresentLocked assumes the lock is already held and adds the the provided +// addIfNotPresentLocked assumes the lock is already held and adds the provided // item to the queue if it does not already exist. func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { if _, exists := h.data.items[key]; exists { diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/api/doc.go b/staging/src/k8s.io/client-go/tools/clientcmd/api/doc.go index 0a081871ac8..5871575a669 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package api diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/doc.go index 9750cf73acc..cbf29ccf24d 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/doc.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -15,4 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package + package v1 diff --git a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go index 4ff59560379..c12daad022f 100644 --- a/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ b/staging/src/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go @@ -80,7 +80,7 @@ func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { // Update will update an existing annotation on a given resource. func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { if cml.cm == nil { - return errors.New("endpoint not initialized, call get or create first") + return errors.New("configmap not initialized, call get or create first") } recordBytes, err := json.Marshal(ler) if err != nil { diff --git a/staging/src/k8s.io/client-go/tools/portforward/portforward.go b/staging/src/k8s.io/client-go/tools/portforward/portforward.go index 9d7936e7c5f..bc6f43d7d15 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/portforward.go +++ b/staging/src/k8s.io/client-go/tools/portforward/portforward.go @@ -340,3 +340,20 @@ func (pf *PortForwarder) Close() { } } } + +// GetPorts will return the ports that were forwarded; this can be used to +// retrieve the locally-bound port in cases where the input was port 0. This +// function will signal an error if the Ready channel is nil or if the +// listeners are not ready yet; this function will succeed after the Ready +// channel has been closed. +func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) { + if pf.Ready == nil { + return nil, fmt.Errorf("no Ready channel provided") + } + select { + case <-pf.Ready: + return pf.ports, nil + default: + return nil, fmt.Errorf("listeners not ready") + } +} diff --git a/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go b/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go index 2abbf16f29a..c1dd70b3e5e 100644 --- a/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go +++ b/staging/src/k8s.io/client-go/tools/portforward/portforward_test.go @@ -99,8 +99,17 @@ func TestParsePortsAndNew(t *testing.T) { if dialer.dialed { t.Fatalf("%d: expected not dialed", i) } - if e, a := test.expected, pf.ports; !reflect.DeepEqual(e, a) { - t.Fatalf("%d: ports: expected %#v, got %#v", i, e, a) + if _, portErr := pf.GetPorts(); portErr == nil { + t.Fatalf("%d: GetPorts: error expected but got nil", i) + } + + // mock-signal the Ready channel + close(readyChan) + + if ports, portErr := pf.GetPorts(); portErr != nil { + t.Fatalf("%d: GetPorts: unable to retrieve ports: %s", i, portErr) + } else if !reflect.DeepEqual(test.expected, ports) { + t.Fatalf("%d: ports: expected %#v, got %#v", i, test.expected, ports) } if e, a := expectedStopChan, pf.stopChan; e != a { t.Fatalf("%d: stopChan: expected %#v, got %#v", i, e, a) diff --git a/staging/src/k8s.io/client-go/transport/spdy/spdy.go b/staging/src/k8s.io/client-go/transport/spdy/spdy.go index e0eb468ba36..53cc7ee18c5 100644 --- a/staging/src/k8s.io/client-go/transport/spdy/spdy.go +++ b/staging/src/k8s.io/client-go/transport/spdy/spdy.go @@ -38,7 +38,7 @@ func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, er if err != nil { return nil, nil, err } - upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true) + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false) wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) if err != nil { return nil, nil, err diff --git a/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go b/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go index 417ac001b84..8321876acf4 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go +++ b/staging/src/k8s.io/client-go/util/workqueue/rate_limitting_queue.go @@ -20,10 +20,10 @@ package workqueue type RateLimitingInterface interface { DelayingInterface - // AddRateLimited adds an item to the workqueue after the rate limiter says its ok + // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok AddRateLimited(item interface{}) - // Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing + // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. Forget(item interface{}) @@ -55,7 +55,7 @@ type rateLimitingType struct { rateLimiter RateLimiter } -// AddRateLimited AddAfter's the item based on the time when the rate limiter says its ok +// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok func (q *rateLimitingType) AddRateLimited(item interface{}) { q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go index b221d7eb49c..3285a056fa9 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.apiserver.code-generator.k8s.io + package example // import "k8s.io/code-generator/_examples/apiserver/apis/example" diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go index 5b6bd5b306f..6b1fe6c1196 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example // +groupName=example.apiserver.code-generator.k8s.io + package v1 diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go index 3864803757f..0edb56dcddb 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=example.test.apiserver.code-generator.k8s.io // +groupGoName=SecondExample + package example2 // import "k8s.io/code-generator/_examples/apiserver/apis/example2" diff --git a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go index 36bd4549cd5..211aefc8c4a 100644 --- a/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/v1/doc.go @@ -19,4 +19,5 @@ limitations under the License. // +groupName=example.test.apiserver.code-generator.k8s.io // +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example2 // +groupGoName=SecondExample + package v1 diff --git a/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go index e6614c0da66..673ac55d7b4 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/apis/example/v1/doc.go @@ -17,4 +17,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:defaulter-gen=TypeMeta // +groupName=example.crd.code-generator.k8s.io + package v1 diff --git a/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go b/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go index 6521d83ff39..5d1cbec5efb 100644 --- a/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go +++ b/staging/src/k8s.io/code-generator/_examples/crd/apis/example2/v1/doc.go @@ -18,4 +18,5 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=example.test.crd.code-generator.k8s.io // +groupGoName=SecondExample + package v1 diff --git a/staging/src/k8s.io/csi-api/Godeps/Godeps.json b/staging/src/k8s.io/csi-api/Godeps/Godeps.json index f9cc4e25f1c..81318ce2ace 100644 --- a/staging/src/k8s.io/csi-api/Godeps/Godeps.json +++ b/staging/src/k8s.io/csi-api/Godeps/Godeps.json @@ -92,7 +92,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", @@ -290,6 +290,18 @@ "ImportPath": "k8s.io/api/storage/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, + { + "ImportPath": "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/staging/src/k8s.io/csi-api/pkg/crd/BUILD b/staging/src/k8s.io/csi-api/pkg/crd/BUILD new file mode 100644 index 00000000000..3a9a64a5c88 --- /dev/null +++ b/staging/src/k8s.io/csi-api/pkg/crd/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["crd.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/csi-api/pkg/crd", + importpath = "k8s.io/csi-api/pkg/crd", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["crd_test.go"], + data = glob(["testdata/**"]), + embed = [":go_default_library"], + deps = [ + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", + "//vendor/github.com/ghodss/yaml:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/csi-api/pkg/crd/crd.go b/staging/src/k8s.io/csi-api/pkg/crd/crd.go new file mode 100644 index 00000000000..4a7936e4140 --- /dev/null +++ b/staging/src/k8s.io/csi-api/pkg/crd/crd.go @@ -0,0 +1,119 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + csiapiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" + "reflect" +) + +// NOTE: the CRD functions here and the associated unit tests are non-ideal temporary measures in +// release 1.12 in order to aid manual CRD installation. This installation will be automated in +// subsequent releases and as a result this package will be removed. + +// CSIDriverCRD returns the CustomResourceDefinition for CSIDriver object. +func CSIDriverCRD() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: csiapiv1alpha1.CsiDriverResourcePlural + "." + csiapiv1alpha1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: csiapiv1alpha1.GroupName, + Version: csiapiv1alpha1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Validation: &apiextensionsv1beta1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ + "spec": { + Description: "Specification of the CSI Driver.", + Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ + "attachRequired": { + Description: "Indicates this CSI volume driver requires an attach operation," + + " and that Kubernetes should call attach and wait for any attach operation to" + + " complete before proceeding to mount.", + Type: "boolean", + }, + "podInfoOnMountVersion": { + Description: "Indicates this CSI volume driver requires additional pod" + + " information (like podName, podUID, etc.) during mount operations.", + Type: "string", + }, + }, + }, + }, + }, + }, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: csiapiv1alpha1.CsiDriverResourcePlural, + Kind: reflect.TypeOf(csiapiv1alpha1.CSIDriver{}).Name(), + }, + }, + } +} + +// CSINodeInfoCRD returns the CustomResourceDefinition for CSINodeInfo object. +func CSINodeInfoCRD() *apiextensionsv1beta1.CustomResourceDefinition { + return &apiextensionsv1beta1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: csiapiv1alpha1.CsiNodeInfoResourcePlural + "." + csiapiv1alpha1.GroupName, + }, + Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ + Group: csiapiv1alpha1.GroupName, + Version: csiapiv1alpha1.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.ClusterScoped, + Validation: &apiextensionsv1beta1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ + "csiDrivers": { + Description: "List of CSI drivers running on the node and their properties.", + Type: "array", + Items: &apiextensionsv1beta1.JSONSchemaPropsOrArray{ + Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Properties: map[string]apiextensionsv1beta1.JSONSchemaProps{ + "driver": { + Description: "The CSI driver that this object refers to.", + Type: "string", + }, + "nodeID": { + Description: "The node from the driver point of view.", + Type: "string", + }, + "topologyKeys": { + Description: "List of keys supported by the driver.", + Type: "array", + Items: &apiextensionsv1beta1.JSONSchemaPropsOrArray{ + Schema: &apiextensionsv1beta1.JSONSchemaProps{ + Type: "string", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ + Plural: csiapiv1alpha1.CsiNodeInfoResourcePlural, + Kind: reflect.TypeOf(csiapiv1alpha1.CSINodeInfo{}).Name(), + }, + }, + } +} diff --git a/staging/src/k8s.io/csi-api/pkg/crd/crd_test.go b/staging/src/k8s.io/csi-api/pkg/crd/crd_test.go new file mode 100644 index 00000000000..369a3b458b5 --- /dev/null +++ b/staging/src/k8s.io/csi-api/pkg/crd/crd_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd_test + +import ( + "path/filepath" + "testing" + + "github.com/ghodss/yaml" + "io/ioutil" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsscheme "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/csi-api/pkg/crd" + "os" +) + +func TestBootstrapCRDs(t *testing.T) { + testObjects(t, crd.CSIDriverCRD(), "csidriver.yaml") + testObjects(t, crd.CSINodeInfoCRD(), "csinodeinfo.yaml") +} + +func testObjects(t *testing.T, crd *apiextensionsv1beta1.CustomResourceDefinition, fixtureFilename string) { + filename := filepath.Join("testdata", fixtureFilename) + expectedYAML, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatal(err) + } + + jsonData, err := runtime.Encode(apiextensionsscheme.Codecs.LegacyCodec(apiextensionsv1beta1.SchemeGroupVersion), crd) + if err != nil { + t.Fatal(err) + } + yamlData, err := yaml.JSONToYAML(jsonData) + if err != nil { + t.Fatal(err) + } + if string(yamlData) != string(expectedYAML) { + t.Errorf("Bootstrap CRD data does not match the test fixture in %s", filename) + + const updateEnvVar = "UPDATE_CSI_CRD_FIXTURE_DATA" + if os.Getenv(updateEnvVar) == "true" { + if err := ioutil.WriteFile(filename, []byte(yamlData), os.FileMode(0755)); err == nil { + t.Logf("Updated data in %s", filename) + t.Logf("Verify the diff, commit changes, and rerun the tests") + } else { + t.Logf("Could not update data in %s: %v", filename, err) + } + } else { + t.Logf("Diff between data in code and fixture data in %s:\n-------------\n%s", filename, diff.StringDiff(string(yamlData), string(expectedYAML))) + t.Logf("If the change is expected, re-run with %s=true to update the fixtures", updateEnvVar) + } + } +} diff --git a/staging/src/k8s.io/csi-api/pkg/crd/testdata/csidriver.yaml b/staging/src/k8s.io/csi-api/pkg/crd/testdata/csidriver.yaml new file mode 100644 index 00000000000..d950cbf494a --- /dev/null +++ b/staging/src/k8s.io/csi-api/pkg/crd/testdata/csidriver.yaml @@ -0,0 +1,33 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: csidrivers.csi.storage.k8s.io +spec: + group: csi.storage.k8s.io + names: + kind: CSIDriver + plural: csidrivers + scope: Cluster + validation: + openAPIV3Schema: + properties: + spec: + description: Specification of the CSI Driver. + properties: + attachRequired: + description: Indicates this CSI volume driver requires an attach operation, + and that Kubernetes should call attach and wait for any attach operation + to complete before proceeding to mount. + type: boolean + podInfoOnMountVersion: + description: Indicates this CSI volume driver requires additional pod + information (like podName, podUID, etc.) during mount operations. + type: string + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/staging/src/k8s.io/csi-api/pkg/crd/testdata/csinodeinfo.yaml b/staging/src/k8s.io/csi-api/pkg/crd/testdata/csinodeinfo.yaml new file mode 100644 index 00000000000..6ea408d5e77 --- /dev/null +++ b/staging/src/k8s.io/csi-api/pkg/crd/testdata/csinodeinfo.yaml @@ -0,0 +1,37 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: csinodeinfos.csi.storage.k8s.io +spec: + group: csi.storage.k8s.io + names: + kind: CSINodeInfo + plural: csinodeinfos + scope: Cluster + validation: + openAPIV3Schema: + properties: + csiDrivers: + description: List of CSI drivers running on the node and their properties. + items: + properties: + driver: + description: The CSI driver that this object refers to. + type: string + nodeID: + description: The node from the driver point of view. + type: string + topologyKeys: + description: List of keys supported by the driver. + items: + type: string + type: array + type: array + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index a4ae3ae5fde..4c5901100b6 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -260,7 +260,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/mxk/go-flowrate/flowrate", diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go index def606a7e53..3bd2bc737c8 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=apiregistration.k8s.io // Package api is the internal version of the API. -// +groupName=apiregistration.k8s.io package apiregistration // import "k8s.io/kube-aggregator/pkg/apis/apiregistration" diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go index 33f07794e3a..19f18ed4c2c 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go @@ -17,8 +17,9 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration // +k8s:openapi-gen=true +// +groupName=apiregistration.k8s.io -// Package v1beta1 contains the API Registration API, which is responsible for +// Package v1 contains the API Registration API, which is responsible for // registering an API `Group`/`Version` with another kubernetes like API server. // The `APIService` holds information about the other API server in // `APIServiceSpec` type as well as general `TypeMeta` and `ObjectMeta`. The @@ -31,6 +32,4 @@ limitations under the License. // The return status is a set of conditions for this aggregation. Currently // there is only one condition named "Available", if true, it means the // api/server requests will be redirected to specified API server. -// -// +groupName=apiregistration.k8s.io package v1 // import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go index d5de2002bb1..ad50d368306 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go @@ -17,6 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration // +k8s:openapi-gen=true +// +groupName=apiregistration.k8s.io // Package v1beta1 contains the API Registration API, which is responsible for // registering an API `Group`/`Version` with another kubernetes like API server. @@ -31,6 +32,4 @@ limitations under the License. // The return status is a set of conditions for this aggregation. Currently // there is only one condition named "Available", if true, it means the // api/server requests will be redirected to specified API server. -// -// +groupName=apiregistration.k8s.io package v1beta1 // import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index 5e0d652206e..991d318e13a 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -161,7 +161,8 @@ func maybeWrapForConnectionUpgrades(restConfig *restclient.Config, rt http.Round return nil, true, err } followRedirects := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StreamingProxyRedirects) - upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, followRedirects) + requireSameHostRedirects := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.ValidateProxyRedirects) + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) wrappedRT, err := restclient.HTTPWrappersForConfig(restConfig, upgradeRoundTripper) if err != nil { return nil, true, err diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 5a2e5ba459b..b764ae31114 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -84,7 +84,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/doc.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/doc.go index 73997cac16b..6810baab230 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/doc.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=custom.metrics.k8s.io + package custom_metrics diff --git a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/conversion.go b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/conversion.go index ce624011693..0c5cfbaf9e7 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/conversion.go +++ b/staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1/conversion.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/metrics/pkg/apis/custom_metrics" @@ -36,12 +37,38 @@ func addConversionFuncs(scheme *runtime.Scheme) error { } func Convert_v1beta1_MetricValue_To_custom_metrics_MetricValue(in *MetricValue, out *custom_metrics.MetricValue, s conversion.Scope) error { + out.TypeMeta = in.TypeMeta + out.DescribedObject = custom_metrics.ObjectReference{ + Kind: in.DescribedObject.Kind, + Namespace: in.DescribedObject.Namespace, + Name: in.DescribedObject.Name, + UID: in.DescribedObject.UID, + APIVersion: in.DescribedObject.APIVersion, + ResourceVersion: in.DescribedObject.ResourceVersion, + FieldPath: in.DescribedObject.FieldPath, + } + out.Timestamp = in.Timestamp + out.WindowSeconds = in.WindowSeconds + out.Value = in.Value out.Metric.Name = in.MetricName out.Metric.Selector = in.Selector return nil } func Convert_custom_metrics_MetricValue_To_v1beta1_MetricValue(in *custom_metrics.MetricValue, out *MetricValue, s conversion.Scope) error { + out.TypeMeta = in.TypeMeta + out.DescribedObject = v1.ObjectReference{ + Kind: in.DescribedObject.Kind, + Namespace: in.DescribedObject.Namespace, + Name: in.DescribedObject.Name, + UID: in.DescribedObject.UID, + APIVersion: in.DescribedObject.APIVersion, + ResourceVersion: in.DescribedObject.ResourceVersion, + FieldPath: in.DescribedObject.FieldPath, + } + out.Timestamp = in.Timestamp + out.WindowSeconds = in.WindowSeconds + out.Value = in.Value out.MetricName = in.Metric.Name out.Selector = in.Metric.Selector return nil diff --git a/staging/src/k8s.io/metrics/pkg/apis/external_metrics/doc.go b/staging/src/k8s.io/metrics/pkg/apis/external_metrics/doc.go index c450a1e3b64..5c6ecec9732 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/external_metrics/doc.go +++ b/staging/src/k8s.io/metrics/pkg/apis/external_metrics/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=external.metrics.k8s.io + package external_metrics diff --git a/staging/src/k8s.io/metrics/pkg/apis/metrics/doc.go b/staging/src/k8s.io/metrics/pkg/apis/metrics/doc.go index 9437a13f8af..0f970b0cebc 100644 --- a/staging/src/k8s.io/metrics/pkg/apis/metrics/doc.go +++ b/staging/src/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -16,4 +16,5 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=metrics.k8s.io + package metrics diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index 94fb0604374..7003a50e3c3 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -252,7 +252,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/pborman/uuid", diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/doc.go index 782c51d19c0..3b200cf7392 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=wardle.k8s.io // Package api is the internal version of the API. -// +groupName=wardle.k8s.io package wardle diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/doc.go index 22f06f63c90..8b5d3d74241 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/sample-apiserver/pkg/apis/wardle // +k8s:defaulter-gen=TypeMeta +// +groupName=wardle.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=wardle.k8s.io package v1alpha1 diff --git a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1/doc.go b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1/doc.go index aa2f7a5b12b..788d82a99c6 100644 --- a/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1/doc.go +++ b/staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/sample-apiserver/pkg/apis/wardle // +k8s:defaulter-gen=TypeMeta +// +groupName=wardle.k8s.io // Package v1beta1 is the v1beta1 version of the API. -// +groupName=wardle.k8s.io package v1beta1 diff --git a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json index 2e5854d5c61..3b77611e6c0 100644 --- a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json @@ -92,7 +92,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index 5100e609c8a..afe92d652a5 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -100,7 +100,7 @@ }, { "ImportPath": "github.com/modern-go/reflect2", - "Rev": "05fbef0ca5da472bbf96c9322b84a53edc03c9fd" + "Rev": "94122c33edd36123c84d5368cfb2b69df93a0ec8" }, { "ImportPath": "github.com/peterbourgon/diskv", diff --git a/staging/src/k8s.io/sample-controller/controller_test.go b/staging/src/k8s.io/sample-controller/controller_test.go index 8eea816131e..f25459c6c90 100644 --- a/staging/src/k8s.io/sample-controller/controller_test.go +++ b/staging/src/k8s.io/sample-controller/controller_test.go @@ -198,7 +198,7 @@ func checkAction(expected, actual core.Action, t *testing.T) { expPatch := e.GetPatch() patch := a.GetPatch() - if !reflect.DeepEqual(expPatch, expPatch) { + if !reflect.DeepEqual(expPatch, patch) { t.Errorf("Action %s %s has wrong patch\nDiff:\n %s", a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expPatch, patch)) } diff --git a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/doc.go b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/doc.go index b445526984d..e6c135eed8d 100644 --- a/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/doc.go +++ b/staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package +// +groupName=samplecontroller.k8s.io // Package v1alpha1 is the v1alpha1 version of the API. -// +groupName=samplecontroller.k8s.io package v1alpha1 diff --git a/test/cmd/diff.sh b/test/cmd/diff.sh index 4edd8d8121d..d52a75bdfce 100755 --- a/test/cmd/diff.sh +++ b/test/cmd/diff.sh @@ -18,36 +18,22 @@ set -o errexit set -o nounset set -o pipefail -# Runs tests for kubectl alpha diff +# Runs tests for kubectl diff run_kubectl_diff_tests() { set -o nounset set -o errexit create_and_use_new_namespace - kube::log::status "Testing kubectl alpha diff" + kube::log::status "Testing kubectl diff" # Test that it works when the live object doesn't exist - output_message=$(kubectl alpha diff LOCAL LIVE -f hack/testdata/pod.yaml) + output_message=$(kubectl diff -f hack/testdata/pod.yaml) kube::test::if_has_string "${output_message}" 'test-pod' kubectl apply -f hack/testdata/pod.yaml - # Ensure that selfLink has been added, and shown in the diff - output_message=$(kubectl alpha diff -f hack/testdata/pod.yaml) - kube::test::if_has_string "${output_message}" 'selfLink' - output_message=$(kubectl alpha diff LOCAL LIVE -f hack/testdata/pod.yaml) - kube::test::if_has_string "${output_message}" 'selfLink' - output_message=$(kubectl alpha diff LOCAL MERGED -f hack/testdata/pod.yaml) - kube::test::if_has_string "${output_message}" 'selfLink' - - output_message=$(kubectl alpha diff MERGED MERGED -f hack/testdata/pod.yaml) - kube::test::if_empty_string "${output_message}" - output_message=$(kubectl alpha diff LIVE LIVE -f hack/testdata/pod.yaml) - kube::test::if_empty_string "${output_message}" - output_message=$(kubectl alpha diff LAST LAST -f hack/testdata/pod.yaml) - kube::test::if_empty_string "${output_message}" - output_message=$(kubectl alpha diff LOCAL LOCAL -f hack/testdata/pod.yaml) - kube::test::if_empty_string "${output_message}" + output_message=$(kubectl diff -f hack/testdata/pod-changed.yaml) + kube::test::if_has_string "${output_message}" 'k8s.gcr.io/pause:3.0' kubectl delete -f hack/testdata/pod.yaml diff --git a/test/cmd/generic-resources.sh b/test/cmd/generic-resources.sh index 14872a19047..66eb82f5c06 100755 --- a/test/cmd/generic-resources.sh +++ b/test/cmd/generic-resources.sh @@ -430,7 +430,7 @@ run_recursive_resources_tests() { ## Attempt to rollback the replication controllers to revision 1 recursively output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}") # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error - kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}' + kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for "ReplicationController"' kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" ## Attempt to pause the replication controllers recursively output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt index 4fd04cd1530..188c159fe55 100755 --- a/test/conformance/testdata/conformance.txt +++ b/test/conformance/testdata/conformance.txt @@ -134,6 +134,7 @@ test/e2e/common/projected.go: "should provide container's memory request" test/e2e/common/projected.go: "should provide node allocatable (cpu) as default cpu limit if the limit is not set" test/e2e/common/projected.go: "should provide node allocatable (memory) as default memory limit if the limit is not set" test/e2e/common/projected.go: "should project all components that make up the projection API" +test/e2e/common/runtime.go: "should run with the expected status" test/e2e/common/secrets.go: "should be consumable from pods in env vars" test/e2e/common/secrets.go: "should be consumable via the environment" test/e2e/common/secrets_volume.go: "should be consumable from pods in volume" @@ -182,6 +183,8 @@ test/e2e/node/pre_stop.go: "should call prestop when killing a pod" test/e2e/scheduling/predicates.go: "validates resource limits of pods that are allowed to run" test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if not matching" test/e2e/scheduling/predicates.go: "validates that NodeSelector is respected if matching" +test/e2e/storage/empty_dir_wrapper.go: "should not conflict" +test/e2e/storage/empty_dir_wrapper.go: "should not cause race condition when used for configmaps" test/e2e/storage/subpath.go: "should support subpaths with secret pod" test/e2e/storage/subpath.go: "should support subpaths with configmap pod" test/e2e/storage/subpath.go: "should support subpaths with configmap pod with mountPath of existing file" diff --git a/test/e2e/apimachinery/BUILD b/test/e2e/apimachinery/BUILD index 39e5328d447..b60bef32540 100644 --- a/test/e2e/apimachinery/BUILD +++ b/test/e2e/apimachinery/BUILD @@ -28,7 +28,6 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/apis/rbac:go_default_library", "//pkg/printers:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library", "//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", @@ -54,6 +53,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/user:go_default_library", diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 173b6f56180..58a9fd9e22f 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -32,6 +32,7 @@ import ( unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/discovery" @@ -39,7 +40,6 @@ import ( apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" rbacapi "k8s.io/kubernetes/pkg/apis/rbac" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" @@ -87,7 +87,7 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse // delete the APIService first to avoid causing discovery errors _ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil) - _ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil) + _ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver-deployment", nil) _ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil) _ = client.CoreV1().Services(namespace).Delete("sample-api", nil) _ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil) diff --git a/test/e2e/apimachinery/custom_resource_definition.go b/test/e2e/apimachinery/custom_resource_definition.go index fea9287e60c..fb0469487e6 100644 --- a/test/e2e/apimachinery/custom_resource_definition.go +++ b/test/e2e/apimachinery/custom_resource_definition.go @@ -20,7 +20,7 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apiextensions-apiserver/test/integration/fixtures" - utilversion "k8s.io/kubernetes/pkg/util/version" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" diff --git a/test/e2e/apimachinery/table_conversion.go b/test/e2e/apimachinery/table_conversion.go index f431bb28943..2a8b9893bee 100644 --- a/test/e2e/apimachinery/table_conversion.go +++ b/test/e2e/apimachinery/table_conversion.go @@ -31,8 +31,8 @@ import ( metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/client-go/util/workqueue" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/pkg/printers" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" ) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index ef942e18ad1..f339fd83781 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -33,10 +33,10 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -52,17 +52,18 @@ const ( roleBindingName = "webhook-auth-reader" // The webhook configuration names should not be reused between test instances. - crWebhookConfigName = "e2e-test-webhook-config-cr" - webhookConfigName = "e2e-test-webhook-config" - attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod" - mutatingWebhookConfigName = "e2e-test-mutating-webhook-config" - podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod" - crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr" - webhookFailClosedConfigName = "e2e-test-webhook-fail-closed" - webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config" - removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config" - removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config" - crdWebhookConfigName = "e2e-test-webhook-config-crd" + crWebhookConfigName = "e2e-test-webhook-config-cr" + webhookConfigName = "e2e-test-webhook-config" + attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod" + mutatingWebhookConfigName = "e2e-test-mutating-webhook-config" + podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod" + crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr" + webhookFailClosedConfigName = "e2e-test-webhook-fail-closed" + validatingWebhookForWebhooksConfigName = "e2e-test-validating-webhook-for-webhooks-config" + mutatingWebhookForWebhooksConfigName = "e2e-test-mutating-webhook-for-webhooks-config" + dummyValidatingWebhookConfigName = "e2e-test-dummy-validating-webhook-config" + dummyMutatingWebhookConfigName = "e2e-test-dummy-mutating-webhook-config" + crdWebhookConfigName = "e2e-test-webhook-config-crd" skipNamespaceLabelKey = "skip-webhook-admission" skipNamespaceLabelValue = "yes" @@ -75,6 +76,8 @@ const ( failNamespaceLabelKey = "fail-closed-webhook" failNamespaceLabelValue = "yes" failNamespaceName = "fail-closed-namesapce" + addedLabelKey = "added-label" + addedLabelValue = "yes" ) var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0") @@ -154,10 +157,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() { testMutatingPodWebhook(f) }) - It("Should not be able to prevent deleting validating-webhook-configurations or mutating-webhook-configurations", func() { - webhookCleanup := registerWebhookForWebhookConfigurations(f, context) - defer webhookCleanup() - testWebhookForWebhookConfigurations(f) + It("Should not be able to mutate or prevent deletion of webhook configuration objects", func() { + validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, context) + defer validatingWebhookCleanup() + mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, context) + defer mutatingWebhookCleanup() + testWebhooksForWebhookConfigurations(f) }) It("Should mutate custom resource", func() { @@ -801,16 +806,18 @@ func testFailClosedWebhook(f *framework.Framework) { } } -func registerWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() { +func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() { var err error client := f.ClientSet - By("Registering a webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") + By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") namespace := f.Namespace.Name - configName := webhookForWebhooksConfigName + configName := validatingWebhookForWebhooksConfigName failurePolicy := v1beta1.Fail - // This webhook will deny all requests to Delete admissionregistration objects + // This webhook denies all requests to Delete validating webhook configuration and + // mutating webhook configuration objects. It should never be called, however, because + // dynamic admission webhooks should not be called on requests involving webhook configuration objects. _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: configName, @@ -841,7 +848,6 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce }, }, }) - framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) // The webhook configuration is honored in 10s. @@ -852,23 +858,76 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce } } -// This test assumes that the deletion-rejecting webhook defined in -// registerWebhookForWebhookConfigurations is in place. -func testWebhookForWebhookConfigurations(f *framework.Framework) { +func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() { var err error client := f.ClientSet - By("Creating a validating-webhook-configuration object") + By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API") + + namespace := f.Namespace.Name + configName := mutatingWebhookForWebhooksConfigName + failurePolicy := v1beta1.Fail + + // This webhook adds a label to all requests create to validating webhook configuration and + // mutating webhook configuration objects. It should never be called, however, because + // dynamic admission webhooks should not be called on requests involving webhook configuration objects. + _, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: configName, + }, + Webhooks: []v1beta1.Webhook{ + { + Name: "add-label-to-webhook-configurations.k8s.io", + Rules: []v1beta1.RuleWithOperations{{ + Operations: []v1beta1.OperationType{v1beta1.Create}, + Rule: v1beta1.Rule{ + APIGroups: []string{"admissionregistration.k8s.io"}, + APIVersions: []string{"*"}, + Resources: []string{ + "validatingwebhookconfigurations", + "mutatingwebhookconfigurations", + }, + }, + }}, + ClientConfig: v1beta1.WebhookClientConfig{ + Service: &v1beta1.ServiceReference{ + Namespace: namespace, + Name: serviceName, + Path: strPtr("/add-label"), + }, + CABundle: context.signingCert, + }, + FailurePolicy: &failurePolicy, + }, + }, + }) + framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace) + + // The webhook configuration is honored in 10s. + time.Sleep(10 * time.Second) + return func() { + err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil) + framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace) + } +} + +// This test assumes that the deletion-rejecting webhook defined in +// registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating +// webhook defined in registerMutatingWebhookForWebhookConfigurations already exist. +func testWebhooksForWebhookConfigurations(f *framework.Framework) { + var err error + client := f.ClientSet + By("Creating a dummy validating-webhook-configuration object") namespace := f.Namespace.Name failurePolicy := v1beta1.Ignore - _, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{ + mutatedValidatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: removableValidatingHookName, + Name: dummyValidatingWebhookConfigName, }, Webhooks: []v1beta1.Webhook{ { - Name: "should-be-removable-validating-webhook.k8s.io", + Name: "dummy-validating-webhook.k8s.io", Rules: []v1beta1.RuleWithOperations{{ Operations: []v1beta1.OperationType{v1beta1.Create}, // This will not match any real resources so this webhook should never be called. @@ -894,25 +953,28 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) { }, }, }) - framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableValidatingHookName, namespace) + framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace) + if mutatedValidatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedValidatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue { + framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyValidatingWebhookConfigName) + } // The webhook configuration is honored in 10s. time.Sleep(10 * time.Second) By("Deleting the validating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(removableValidatingHookName, nil) - framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableValidatingHookName, namespace) + err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(dummyValidatingWebhookConfigName, nil) + framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace) - By("Creating a mutating-webhook-configuration object") + By("Creating a dummy mutating-webhook-configuration object") - _, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{ + mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: removableMutatingHookName, + Name: dummyMutatingWebhookConfigName, }, Webhooks: []v1beta1.Webhook{ { - Name: "should-be-removable-mutating-webhook.k8s.io", + Name: "dummy-mutating-webhook.k8s.io", Rules: []v1beta1.RuleWithOperations{{ Operations: []v1beta1.OperationType{v1beta1.Create}, // This will not match any real resources so this webhook should never be called. @@ -938,15 +1000,18 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) { }, }, }) - framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableMutatingHookName, namespace) + framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace) + if mutatedMutatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedMutatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue { + framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyMutatingWebhookConfigName) + } // The webhook configuration is honored in 10s. time.Sleep(10 * time.Second) By("Deleting the mutating-webhook-configuration, which should be possible to remove") - err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(removableMutatingHookName, nil) - framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableMutatingHookName, namespace) + err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(dummyMutatingWebhookConfigName, nil) + framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace) } func createNamespace(f *framework.Framework, ns *v1.Namespace) error { diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index e7df3761cb9..31c541711fe 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -180,8 +180,8 @@ var _ = SIGDescribe("CronJob", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring no unexpected event has happened") - err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) - Expect(err).NotTo(HaveOccurred()) + err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"}) + Expect(err).To(HaveOccurred()) By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) @@ -219,8 +219,8 @@ var _ = SIGDescribe("CronJob", func() { Expect(err).NotTo(HaveOccurred()) By("Ensuring MissingJob event has occurred") - err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) - Expect(err).To(HaveOccurred()) + err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"}) + Expect(err).NotTo(HaveOccurred()) By("Removing cronjob") err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name) @@ -426,24 +426,26 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error { }) } -// checkNoEventWithReason checks no events with a reason within a list has occurred -func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error { - sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err) - } - events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj) - if err != nil { - return fmt.Errorf("Error in listing events: %s", err) - } - for _, e := range events.Items { - for _, reason := range reasons { - if e.Reason == reason { - return fmt.Errorf("Found event with reason %s: %#v", reason, e) +// waitForEventWithReason waits for events with a reason within a list has occurred +func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error { + return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) { + sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{}) + if err != nil { + return false, err + } + events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj) + if err != nil { + return false, err + } + for _, e := range events.Items { + for _, reason := range reasons { + if e.Reason == reason { + return true, nil + } } } - } - return nil + return false, nil + }) } // filterNotDeletedJobs returns the job list without any jobs that are pending diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 62e493af1cc..e9e0e542f08 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -115,8 +115,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { nodeCount = len(nodes.Items) coreCount = 0 for _, node := range nodes.Items { - quentity := node.Status.Capacity[v1.ResourceCPU] - coreCount += quentity.Value() + quantity := node.Status.Allocatable[v1.ResourceCPU] + coreCount += quantity.Value() } By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount)) Expect(nodeCount).NotTo(BeZero()) @@ -369,6 +369,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer deleteNodePool(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName) framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout)) + // We wait for nodes to become schedulable to make sure the new nodes + // will be returned by getPoolNodes below. + framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout)) glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") By("Getting memory available on new nodes, so we can account for it when creating RC") @@ -376,7 +379,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { Expect(len(nodes)).Should(Equal(extraNodes)) extraMemMb := 0 for _, node := range nodes { - mem := node.Status.Capacity[v1.ResourceMemory] + mem := node.Status.Allocatable[v1.ResourceMemory] extraMemMb += int((&mem).Value() / 1024 / 1024) } diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index 74f14a9bd3f..779fff5ef45 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -22,6 +22,7 @@ go_library( "expansion.go", "host_path.go", "init_container.go", + "kubelet.go", "kubelet_etc_hosts.go", "lifecycle_hook.go", "networking.go", @@ -44,14 +45,12 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", "//pkg/client/conditions:go_default_library", - "//pkg/features:go_default_library", "//pkg/kubelet:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/images:go_default_library", "//pkg/kubelet/sysctl:go_default_library", "//pkg/security/apparmor:go_default_library", "//pkg/util/slice:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/coordination/v1beta1:go_default_library", @@ -67,9 +66,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/scale:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go index 2f779e9a77b..3776dd0bbc7 100644 --- a/test/e2e/common/downward_api.go +++ b/test/e2e/common/downward_api.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" - utilversion "k8s.io/kubernetes/pkg/util/version" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" diff --git a/test/e2e_node/kubelet_test.go b/test/e2e/common/kubelet.go similarity index 86% rename from test/e2e_node/kubelet_test.go rename to test/e2e/common/kubelet.go index 72c50695186..09575cda512 100644 --- a/test/e2e_node/kubelet_test.go +++ b/test/e2e/common/kubelet.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package e2e_node +package common import ( "bytes" @@ -39,12 +39,13 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) Context("when scheduling a busybox command in a pod", func() { podName := "busybox-scheduling-" + string(uuid.NewUUID()) + /* Release : v1.9 Testname: Kubelet, log output, default Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ - It("it should print the output to logs [NodeConformance]", func() { + It("should print the output to logs [NodeConformance]", func() { podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { - Image: busyboxImage, + Image: framework.BusyBoxImage, Name: podName, Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"}, }, @@ -88,7 +89,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { - Image: busyboxImage, + Image: framework.BusyBoxImage, Name: podName, Command: []string{"/bin/false"}, }, @@ -97,7 +98,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) }) - It("should have an error terminated reason [NodeConformance]", func() { + It("should have an terminated reason [NodeConformance]", func() { Eventually(func() error { podData, err := podClient.Get(podName, metav1.GetOptions{}) if err != nil { @@ -110,8 +111,8 @@ var _ = framework.KubeDescribe("Kubelet", func() { if contTerminatedState == nil { return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status) } - if contTerminatedState.Reason != "Error" { - return fmt.Errorf("expected terminated state reason to be error. Got %+v", contTerminatedState) + if contTerminatedState.ExitCode == 0 || contTerminatedState.Reason == "" { + return fmt.Errorf("expected non-zero exitCode and non-empty terminated state reason. Got exitCode: %+v and terminated state reason: %+v", contTerminatedState.ExitCode, contTerminatedState.Reason) } return nil }, time.Minute, time.Second*4).Should(BeNil()) @@ -125,7 +126,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { Context("when scheduling a busybox Pod with hostAliases", func() { podName := "busybox-host-aliases" + string(uuid.NewUUID()) - It("it should write entries to /etc/hosts [NodeConformance]", func() { + It("should write entries to /etc/hosts [NodeConformance]", func() { podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -135,7 +136,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { - Image: busyboxImage, + Image: framework.BusyBoxImage, Name: podName, Command: []string{"/bin/sh", "-c", "cat /etc/hosts; sleep 6000"}, }, @@ -169,12 +170,8 @@ var _ = framework.KubeDescribe("Kubelet", func() { }) Context("when scheduling a read only busybox container", func() { podName := "busybox-readonly-fs" + string(uuid.NewUUID()) - /* - Release : v1.9 - Testname: Kubelet, Pod with read only root file system - Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected. - */ - It("it should not write to root filesystem [NodeConformance]", func() { + + It("should not write to root filesystem [NodeConformance]", func() { isReadOnly := true podClient.CreateSync(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -185,7 +182,7 @@ var _ = framework.KubeDescribe("Kubelet", func() { RestartPolicy: v1.RestartPolicyNever, Containers: []v1.Container{ { - Image: busyboxImage, + Image: framework.BusyBoxImage, Name: podName, Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"}, SecurityContext: &v1.SecurityContext{ diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index c9433f64eee..06030f6d40e 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -37,7 +37,12 @@ var _ = framework.KubeDescribe("Container Runtime", func() { Describe("blackbox test", func() { Context("when starting a container that exits", func() { - It("should run with the expected status [NodeConformance]", func() { + /* + Release : v1.13 + Testname: Container Runtime, Restart Policy, Pod Phases + Description: If the restart policy is set to ‘Always’, Pod MUST be restarted when terminated, If restart policy is ‘OnFailure’, Pod MUST be started only if it is terminated with non-zero exit code. If the restart policy is ‘Never’, Pod MUST never be restarted. All these three test cases MUST verify the restart counts accordingly. + */ + framework.ConformanceIt("should run with the expected status [NodeConformance]", func() { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := v1.Container{ @@ -283,13 +288,13 @@ while true; do sleep 1; done }, { description: "should not be able to pull from private registry without secret", - image: "gcr.io/authenticated-image-pulling/alpine:3.1", + image: "gcr.io/authenticated-image-pulling/alpine:3.7", phase: v1.PodPending, waiting: true, }, { description: "should be able to pull from private registry with secret", - image: "gcr.io/authenticated-image-pulling/alpine:3.1", + image: "gcr.io/authenticated-image-pulling/alpine:3.7", secret: true, phase: v1.PodRunning, waiting: false, diff --git a/test/e2e/common/ttlafterfinished.go b/test/e2e/common/ttlafterfinished.go index 1c62997bd23..24c4694e982 100644 --- a/test/e2e/common/ttlafterfinished.go +++ b/test/e2e/common/ttlafterfinished.go @@ -17,13 +17,12 @@ limitations under the License. package common import ( + "fmt" "time" batch "k8s.io/api/batch/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/test/e2e/framework" @@ -36,10 +35,8 @@ const dummyFinalizer = "k8s.io/dummy-finalizer" var _ = framework.KubeDescribe("TTLAfterFinished", func() { f := framework.NewDefaultFramework("ttlafterfinished") - It("Job should be deleted once it finishes after TTL seconds [Feature:TTLAfterFinished]", func() { - if !utilfeature.DefaultFeatureGate.Enabled(features.TTLAfterFinished) { - framework.Skipf("Skip because %s feature is not enabled; run test with --feature-gates=%s=true", features.TTLAfterFinished, features.TTLAfterFinished) - } + alphaFeatureStr := "[Feature:TTLAfterFinished]" + It(fmt.Sprintf("Job should be deleted once it finishes after TTL seconds %s", alphaFeatureStr), func() { testFinishedJob(f) }) }) diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 39c32e14013..ce428b297bf 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -81,7 +81,6 @@ go_library( "//pkg/util/file:go_default_library", "//pkg/util/system:go_default_library", "//pkg/util/taints:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1beta2:go_default_library", @@ -109,6 +108,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index c51efa17b28..0b8f64cf9d8 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -26,6 +26,7 @@ import ( "time" "k8s.io/api/core/v1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -67,6 +68,7 @@ type Framework struct { ClientSet clientset.Interface KubemarkExternalClusterClientSet clientset.Interface + APIExtensionsClientSet apiextensionsclient.Interface CSIClientSet csi.Interface InternalClientset *internalclientset.Clientset @@ -176,6 +178,8 @@ func (f *Framework) BeforeEach() { } f.ClientSet, err = clientset.NewForConfig(config) Expect(err).NotTo(HaveOccurred()) + f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config) + Expect(err).NotTo(HaveOccurred()) f.InternalClientset, err = internalclientset.NewForConfig(config) Expect(err).NotTo(HaveOccurred()) f.AggregatorClient, err = aggregatorclient.NewForConfig(config) diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go index 6b8ffeb7c11..d0731cbff5d 100644 --- a/test/e2e/framework/nodes_util.go +++ b/test/e2e/framework/nodes_util.go @@ -63,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error { os.Environ(), "TEST_ETCD_VERSION="+target_version, "STORAGE_BACKEND="+target_storage, - "TEST_ETCD_IMAGE=3.2.24-0") + "TEST_ETCD_IMAGE=3.2.24-1") _, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M") return err @@ -103,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error { env = append(env, "TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion, "STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage, - "TEST_ETCD_IMAGE=3.2.24-0") + "TEST_ETCD_IMAGE=3.2.24-1") } else { // In e2e tests, we skip the confirmation prompt about // implicit etcd upgrades to simulate the user entering "y". diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index cedb857f213..be9a157d933 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -507,6 +507,9 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error { // Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod // not existing. func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error { + if pod == nil { + return nil + } return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace()) } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index e7d06b2ed1a..c1edbf7d0d2 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -233,7 +233,7 @@ func RegisterCommonFlags() { flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.") flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.") flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.") - flag.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.") + flag.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/k8s.io/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.") } // Register flags specific to the cluster e2e test suite. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 1ef4a3e6836..66eb8728019 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -63,6 +63,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" utilyaml "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/apimachinery/pkg/watch" @@ -97,7 +98,6 @@ import ( sshutil "k8s.io/kubernetes/pkg/ssh" "k8s.io/kubernetes/pkg/util/system" taintutils "k8s.io/kubernetes/pkg/util/taints" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -4153,7 +4153,9 @@ func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([] // Filter out not-ready nodes. FilterNodes(nodes, func(node v1.Node) bool { - return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) + nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) + networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false) + return nodeReady && networkReady }) numReady := len(nodes.Items) diff --git a/test/e2e/instrumentation/monitoring/BUILD b/test/e2e/instrumentation/monitoring/BUILD index 8f647a964c3..94327d8f934 100644 --- a/test/e2e/instrumentation/monitoring/BUILD +++ b/test/e2e/instrumentation/monitoring/BUILD @@ -22,7 +22,6 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -30,8 +29,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", + "//staging/src/k8s.io/client-go/discovery/cached:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/restmapper:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library", "//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library", "//test/e2e/common:go_default_library", diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index 36bed14bc69..9c77aa43a40 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -29,13 +29,13 @@ import ( gcm "google.golang.org/api/monitoring/v3" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/discovery" + cacheddiscovery "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/restmapper" "k8s.io/kubernetes/test/e2e/framework" - cmv1beta1 "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1" customclient "k8s.io/metrics/pkg/client/custom_metrics" externalclient "k8s.io/metrics/pkg/client/external_metrics" ) @@ -60,8 +60,10 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { framework.Failf("Failed to load config: %s", err) } discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient) + restMapper.Reset() apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient) - restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{cmv1beta1.SchemeGroupVersion}) customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter) testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) }) @@ -73,8 +75,10 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { framework.Failf("Failed to load config: %s", err) } discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) + cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) + restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient) + restMapper.Reset() apiVersionsGetter := customclient.NewAvailableAPIsGetter(discoveryClient) - restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{cmv1beta1.SchemeGroupVersion}) customMetricsClient := customclient.NewForConfig(config, restMapper, apiVersionsGetter) testCustomMetrics(f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) }) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 25f44a0fd1e..84252336c6c 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -458,7 +458,7 @@ var _ = SIGDescribe("Kubectl client", func() { } // Verify the proxy server logs saw the connection - expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimRight(strings.TrimLeft(framework.TestContext.Host, "https://"), "/api")) + expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(framework.TestContext.Host, "https://"), "/api")) proxyLog := proxyLogs.String() if !strings.Contains(proxyLog, expectedProxyLog) { @@ -799,11 +799,11 @@ metadata: By("apply file doesn't have replicas") framework.RunKubectlOrDieInput(deployment2Yaml, "apply", "set-last-applied", "-f", "-", nsFlag) - By("check last-applied has been updated, annotations doesn't replicas") + By("check last-applied has been updated, annotations doesn't have replicas") output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { - framework.Failf("Missing %s in kubectl view-last-applied", requiredString) + framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) } By("scale set replicas to 3") diff --git a/test/e2e/lifecycle/BUILD b/test/e2e/lifecycle/BUILD index acaf83c0453..89585641a60 100644 --- a/test/e2e/lifecycle/BUILD +++ b/test/e2e/lifecycle/BUILD @@ -22,12 +22,12 @@ go_library( "//pkg/apis/core:go_default_library", "//pkg/kubelet/pod:go_default_library", "//pkg/master/ports:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/lifecycle/cluster_upgrade.go b/test/e2e/lifecycle/cluster_upgrade.go index c82c7e011cd..d8d4a48bf5b 100644 --- a/test/e2e/lifecycle/cluster_upgrade.go +++ b/test/e2e/lifecycle/cluster_upgrade.go @@ -26,8 +26,8 @@ import ( "sync" "time" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/discovery" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index f730d54e8ee..c4203821a4e 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -41,7 +41,7 @@ var ( moreForeverTestTimeout = 2 * 60 * time.Second ) -var _ = SIGDescribe("DNS configMap federations", func() { +var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() { t := &dnsFederationsConfigMapTest{dnsTestCommon: newDnsTestCommon()} diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index d7c2503c1de..9bbaa824b27 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -57,7 +57,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { It("should create pod that uses dns", func() { mkpath := func(file string) string { - return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file) + return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file) } // contrary to the example, this test does not use contexts, for simplicity diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index acfaae982d2..c70c45ed8e8 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1571,6 +1571,9 @@ var _ = SIGDescribe("Services", func() { // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. + framework.SkipIfProviderIs("aws") + svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal @@ -1579,6 +1582,9 @@ var _ = SIGDescribe("Services", func() { // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. + framework.SkipIfProviderIs("aws") + svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal @@ -1587,6 +1593,9 @@ var _ = SIGDescribe("Services", func() { // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. + framework.SkipIfProviderIs("aws") + svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster @@ -1595,6 +1604,9 @@ var _ = SIGDescribe("Services", func() { // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. + framework.SkipIfProviderIs("aws") + svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index d8c211b9c0f..1dcf7f06623 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -22,9 +22,8 @@ go_library( "//pkg/apis/extensions:go_default_library", "//pkg/apis/scheduling:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/quota/evaluator/core:go_default_library", + "//pkg/quota/v1/evaluator/core:go_default_library", "//pkg/scheduler/algorithm/priorities/util:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", "//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library", @@ -35,6 +34,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 4c01a633af6..4bceb42b77f 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -98,7 +98,7 @@ func getGPUsAvailable(f *framework.Framework) int64 { framework.ExpectNoError(err, "getting node list") var gpusAvailable int64 for _, node := range nodeList.Items { - if val, ok := node.Status.Capacity[gpuResourceName]; ok { + if val, ok := node.Status.Allocatable[gpuResourceName]; ok { gpusAvailable += (&val).Value() } } diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index b2aca97c1d7..ed47b7af051 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + utilversion "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" - utilversion "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" testutils "k8s.io/kubernetes/test/utils" diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 1ee85ede306..0f47f656a8a 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -34,7 +34,7 @@ import ( _ "github.com/stretchr/testify/assert" ) -var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func() { +var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { var cs clientset.Interface var nodeList *v1.NodeList var ns string @@ -315,7 +315,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial] [Feature:PodPreemption]", func }) }) -var _ = SIGDescribe("PodPriorityResolution [Serial] [Feature:PodPreemption]", func() { +var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { var cs clientset.Interface var ns string f := framework.NewDefaultFramework("sched-pod-priority") diff --git a/test/e2e/scheduling/resource_quota.go b/test/e2e/scheduling/resource_quota.go index 375fd61562e..40f5b16f3cd 100644 --- a/test/e2e/scheduling/resource_quota.go +++ b/test/e2e/scheduling/resource_quota.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/quota/evaluator/core" + "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 53c30eaf353..395a2a7ac5e 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -34,7 +34,6 @@ go_library( "//pkg/kubelet/apis:go_default_library", "//pkg/kubelet/metrics:go_default_library", "//pkg/util/slice:go_default_library", - "//pkg/util/version:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -44,6 +43,8 @@ go_library( "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -56,6 +57,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/version:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", @@ -64,6 +66,7 @@ go_library( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library", "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", + "//staging/src/k8s.io/csi-api/pkg/crd:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/generated:go_default_library", diff --git a/test/e2e/storage/csi_objects.go b/test/e2e/storage/csi_objects.go index aadf4dfe608..4b719517d7f 100644 --- a/test/e2e/storage/csi_objects.go +++ b/test/e2e/storage/csi_objects.go @@ -21,12 +21,17 @@ package storage import ( "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" "time" "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" @@ -35,6 +40,10 @@ import ( "k8s.io/kubernetes/test/e2e/manifest" . "github.com/onsi/ginkgo" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + csicrd "k8s.io/csi-api/pkg/crd" ) var csiImageVersions = map[string]string{ @@ -117,7 +126,8 @@ func csiServiceAccount( serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) sa := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, + Name: serviceAccountName, + Namespace: config.Namespace, }, } @@ -154,14 +164,13 @@ func csiClusterRoleBindings( By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName())) clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() for _, clusterRoleName := range clusterRolesNames { - binding := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding", + Name: clusterRoleName + "-" + config.Namespace + "-" + string(uuid.NewUUID()), }, Subjects: []rbacv1.Subject{ { - Kind: "ServiceAccount", + Kind: rbacv1.ServiceAccountKind, Name: sa.GetName(), Namespace: sa.GetNamespace(), }, @@ -427,3 +436,87 @@ func deployGCEPDCSIDriver( framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name) } + +func createCSICRDs(c apiextensionsclient.Interface) { + By("Creating CSI CRDs") + crds := []*apiextensionsv1beta1.CustomResourceDefinition{ + csicrd.CSIDriverCRD(), + csicrd.CSINodeInfoCRD(), + } + + for _, crd := range crds { + _, err := c.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) + framework.ExpectNoError(err, "Failed to create CSI CRD %q: %v", crd.Name, err) + } +} + +func deleteCSICRDs(c apiextensionsclient.Interface) { + By("Deleting CSI CRDs") + csiDriverCRDName := csicrd.CSIDriverCRD().Name + csiNodeInfoCRDName := csicrd.CSINodeInfoCRD().Name + err := c.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(csiDriverCRDName, &metav1.DeleteOptions{}) + framework.ExpectNoError(err, "Failed to delete CSI CRD %q: %v", csiDriverCRDName, err) + err = c.ApiextensionsV1beta1().CustomResourceDefinitions().Delete(csiNodeInfoCRDName, &metav1.DeleteOptions{}) + framework.ExpectNoError(err, "Failed to delete CSI CRD %q: %v", csiNodeInfoCRDName, err) +} + +func shredFile(filePath string) { + if _, err := os.Stat(filePath); os.IsNotExist(err) { + framework.Logf("File %v was not found, skipping shredding", filePath) + return + } + framework.Logf("Shredding file %v", filePath) + _, _, err := framework.RunCmd("shred", "--remove", filePath) + if err != nil { + framework.Logf("Failed to shred file %v: %v", filePath, err) + } + if _, err := os.Stat(filePath); os.IsNotExist(err) { + framework.Logf("File %v successfully shredded", filePath) + return + } + // Shred failed Try to remove the file for good meausure + err = os.Remove(filePath) + framework.ExpectNoError(err, "Failed to remove service account file %s", filePath) + +} + +// createGCESecrets downloads the GCP IAM Key for the default compute service account +// and puts it in a secret for the GCE PD CSI Driver to consume +func createGCESecrets(client clientset.Interface, config framework.VolumeTestConfig) { + saEnv := "E2E_GOOGLE_APPLICATION_CREDENTIALS" + saFile := fmt.Sprintf("/tmp/%s/cloud-sa.json", string(uuid.NewUUID())) + + os.MkdirAll(path.Dir(saFile), 0750) + defer os.Remove(path.Dir(saFile)) + + premadeSAFile, ok := os.LookupEnv(saEnv) + if !ok { + framework.Logf("Could not find env var %v, please either create cloud-sa"+ + " secret manually or rerun test after setting %v to the filepath of"+ + " the GCP Service Account to give to the GCE Persistent Disk CSI Driver", saEnv, saEnv) + return + } + + framework.Logf("Found CI service account key at %v", premadeSAFile) + // Need to copy it saFile + stdout, stderr, err := framework.RunCmd("cp", premadeSAFile, saFile) + framework.ExpectNoError(err, "error copying service account key: %s\nstdout: %s\nstderr: %s", err, stdout, stderr) + defer shredFile(saFile) + // Create Secret with this Service Account + fileBytes, err := ioutil.ReadFile(saFile) + framework.ExpectNoError(err, "Failed to read file %v", saFile) + + s := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cloud-sa", + Namespace: config.Namespace, + }, + Type: v1.SecretTypeOpaque, + Data: map[string][]byte{ + filepath.Base(saFile): fileBytes, + }, + } + + _, err = client.CoreV1().Secrets(config.Namespace).Create(s) + framework.ExpectNoError(err, "Failed to create Secret %v", s.GetName()) +} diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index acb989d6ef4..4c3d428dbb8 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -24,10 +24,11 @@ import ( "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - csi "k8s.io/csi-api/pkg/apis/csi/v1alpha1" + csiv1alpha1 "k8s.io/csi-api/pkg/apis/csi/v1alpha1" csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" @@ -54,23 +55,24 @@ type csiTestDriver interface { var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{ "hostPath": initCSIHostpath, - // Feature tag to skip test in CI, pending fix of #62237 - "[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD, + "gcePD": initCSIgcePD, } -var _ = utils.SIGDescribe("CSI Volumes", func() { +var _ = utils.SIGDescribe("[Serial] CSI Volumes", func() { f := framework.NewDefaultFramework("csi-mock-plugin") var ( - cs clientset.Interface - csics csiclient.Interface - ns *v1.Namespace - node v1.Node - config framework.VolumeTestConfig + cs clientset.Interface + crdclient apiextensionsclient.Interface + csics csiclient.Interface + ns *v1.Namespace + node v1.Node + config framework.VolumeTestConfig ) BeforeEach(func() { cs = f.ClientSet + crdclient = f.APIExtensionsClientSet csics = f.CSIClientSet ns = f.Namespace nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) @@ -83,13 +85,18 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { WaitForCompletion: true, } csiDriverRegistrarClusterRole(config) + createCSICRDs(crdclient) + }) + + AfterEach(func() { + deleteCSICRDs(crdclient) }) for driverName, initCSIDriver := range csiTestDrivers { curDriverName := driverName curInitCSIDriver := initCSIDriver - Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() { + Context(fmt.Sprintf("CSI plugin test using CSI driver: %s [Serial]", curDriverName), func() { var ( driver csiTestDriver ) @@ -205,13 +212,13 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { }) }) -func createCSIDriver(csics csiclient.Interface, attachable bool) *csi.CSIDriver { +func createCSIDriver(csics csiclient.Interface, attachable bool) *csiv1alpha1.CSIDriver { By("Creating CSIDriver instance") - driver := &csi.CSIDriver{ + driver := &csiv1alpha1.CSIDriver{ ObjectMeta: metav1.ObjectMeta{ Name: "csi-hostpath", }, - Spec: csi.CSIDriverSpec{ + Spec: csiv1alpha1.CSIDriverSpec{ AttachRequired: &attachable, }, } @@ -353,9 +360,10 @@ type gcePDCSIDriver struct { func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver { cs := f.ClientSet framework.SkipUnlessProviderIs("gce", "gke") - // Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa" - // kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}} - // TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test + + // TODO(#62561): Use credentials through external pod identity when that goes GA instead of downloading keys. + createGCESecrets(cs, config) + framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute) return &gcePDCSIDriver{ @@ -394,6 +402,8 @@ func (g *gcePDCSIDriver) createCSIDriver() { g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */) csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles) csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles) + utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace, + false /* teardown */, []string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name}) deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount) } @@ -405,6 +415,8 @@ func (g *gcePDCSIDriver) cleanupCSIDriver() { deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount) csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles) csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles) + utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace, + true /* teardown */, []string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name}) csiServiceAccount(cs, config, "gce-controller", true /* teardown */) csiServiceAccount(cs, config, "gce-node", true /* teardown */) } diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index e977d74f7d5..845ce1c7cde 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -17,21 +17,20 @@ limitations under the License. package storage import ( + "fmt" + "strconv" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" - "fmt" - "strconv" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - - "k8s.io/kubernetes/test/e2e/storage/utils" ) const ( @@ -56,7 +55,12 @@ const ( var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { f := framework.NewDefaultFramework("emptydir-wrapper") - It("should not conflict", func() { + /* + Release : v1.13 + Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict + Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container. + */ + framework.ConformanceIt("should not conflict", func() { name := "emptydir-wrapper-test-" + string(uuid.NewUUID()) volumeName := "secret-volume" volumeMountPath := "/etc/secret-volume" @@ -172,7 +176,13 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { // but these cases are harder because tmpfs-based emptyDir // appears to be less prone to the race problem. - It("should not cause race condition when used for configmaps [Serial] [Slow]", func() { + /* + Release : v1.13 + Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race + Description: Slow by design [~180 Seconds]. + Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes. + */ + framework.ConformanceIt("should not cause race condition when used for configmaps [Serial] [Slow]", func() { configMapNames := createConfigmapsForRace(f) defer deleteConfigMaps(f, configMapNames) volumes, volumeMounts := makeConfigMapVolumes(configMapNames) @@ -181,6 +191,10 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { } }) + // Slow by design [~150 Seconds]. + // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance. + // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod’s container. + // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. It("should not cause race condition when used for git_repo [Serial] [Slow]", func() { gitURL, gitRepo, cleanup := createGitServer(f) defer cleanup() diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index afda29bf105..08a92cb81ed 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -27,10 +27,10 @@ import ( . "github.com/onsi/ginkgo" "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" + versionutil "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/version" clientset "k8s.io/client-go/kubernetes" - versionutil "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/generated" "k8s.io/kubernetes/test/e2e/storage/utils" diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 92b04b2af96..fdba7e78433 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -61,10 +61,11 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { var ( clientPod *v1.Pod pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume ) BeforeEach(func() { framework.Logf("Initializing pod and pvcs for test") - clientPod, pvc = createPodPVCFromSC(f, c, ns) + clientPod, pvc, pv = createPodPVCFromSC(f, c, ns) }) for _, test := range disruptiveTestTable { func(t disruptiveTest) { @@ -76,13 +77,13 @@ var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { } AfterEach(func() { framework.Logf("Tearing down test spec") - tearDownTestCase(c, f, ns, clientPod, pvc, nil) + tearDownTestCase(c, f, ns, clientPod, pvc, pv, false) pvc, clientPod = nil, nil }) }) }) -func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) { +func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim, *v1.PersistentVolume) { var err error test := storageClassTest{ name: "default", @@ -99,5 +100,5 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string By("Creating a pod with dynamically provisioned volume") pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims) Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test") - return pod, pvc + return pod, pvc, pvs[0] } diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 1ea9fe192bf..f081809dab8 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -208,7 +208,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { AfterEach(func() { framework.Logf("Tearing down test spec") - tearDownTestCase(c, f, ns, clientPod, pvc, pv) + tearDownTestCase(c, f, ns, clientPod, pvc, pv, true /* force PV delete */) pv, pvc, clientPod = nil, nil, nil }) @@ -277,11 +277,14 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew } // tearDownTestCase destroy resources created by initTestCase. -func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { +func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, client *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDeletePV bool) { // Ignore deletion errors. Failing on them will interrupt test cleanup. framework.DeletePodWithWait(f, c, client) framework.DeletePersistentVolumeClaim(c, pvc.Name, ns) - if pv != nil { + if forceDeletePV && pv != nil { framework.DeletePersistentVolume(c, pv.Name) + return } + err := framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 5*time.Minute) + framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", pv.Name) } diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 4e0c0c367e5..f4d066319ae 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -535,7 +535,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { }) }) - Context("StatefulSet with pod affinity", func() { + Context("StatefulSet with pod affinity [Slow]", func() { var testVols map[string][]*localTestVolume const ( ssReplicas = 3 @@ -1464,6 +1464,7 @@ func setupLocalVolumeProvisioner(config *localTestConfig) { By("Bootstrapping local volume provisioner") createServiceAccount(config) createProvisionerClusterRoleBinding(config) + utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, false /* teardown */, []string{testServiceAccount}) createVolumeConfigMap(config) for _, node := range config.nodes { @@ -1477,6 +1478,7 @@ func setupLocalVolumeProvisioner(config *localTestConfig) { func cleanupLocalVolumeProvisioner(config *localTestConfig) { By("Cleaning up cluster role binding") deleteClusterRoleBinding(config) + utils.PrivilegedTestPSPClusterRoleBinding(config.client, config.ns, true /* teardown */, []string{testServiceAccount}) for _, node := range config.nodes { By(fmt.Sprintf("Removing the test discovery directory on node %v", node.Name)) diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 35342d4390f..b47de3433c5 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -312,7 +312,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { framework.DeleteAllStatefulSets(c, ns) }) - It("should be reschedulable", func() { + It("should be reschedulable [Slow]", func() { // Only run on providers with default storageclass framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure") diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 5df99d308aa..dd3e482daf3 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -18,6 +18,7 @@ package testsuites import ( "fmt" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -192,9 +193,25 @@ func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, p volType := pattern.VolType if r.pvc != nil || r.pv != nil { - By("Deleting pv and pvc") - if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { - framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) + switch volType { + case testpatterns.PreprovisionedPV: + By("Deleting pv and pvc") + if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { + framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) + } + case testpatterns.DynamicPV: + By("Deleting pvc") + // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner + if r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { + framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", + r.pv.Name, v1.PersistentVolumeReclaimDelete) + } + err := framework.DeletePersistentVolumeClaim(f.ClientSet, r.pvc.Name, f.Namespace.Name) + framework.ExpectNoError(err, "Failed to delete PVC %v", r.pvc.Name) + err = framework.WaitForPersistentVolumeDeleted(f.ClientSet, r.pv.Name, 5*time.Second, 5*time.Minute) + framework.ExpectNoError(err, "Persistent Volume %v not deleted by dynamic provisioner", r.pv.Name) + default: + framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv) } } diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 8c04a908798..e7bc6dbde73 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -360,7 +360,7 @@ func testSubPath(input *subPathTestInput) { testReadFile(input.f, input.filePathInSubpath, input.pod, 0) }) - It("should fail for new directories when readOnly specified in the volumeSource", func() { + It("should fail for new directories when readOnly specified in the volumeSource [Slow]", func() { if input.roVol == nil { framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) } diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD index 5f640d83a60..ffcca6254eb 100644 --- a/test/e2e/storage/utils/BUILD +++ b/test/e2e/storage/utils/BUILD @@ -14,7 +14,10 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/storage/utils", deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/rbac/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 2329806a0de..fac42ba9159 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -24,7 +24,10 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" @@ -40,6 +43,11 @@ const ( KRestart KubeletOpt = "restart" ) +const ( + // ClusterRole name for e2e test Priveledged Pod Security Policy User + podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp" +) + // PodExec wraps RunKubectl to execute a bash cmd in target pod func PodExec(pod *v1.Pod, bashExec string) (string, error) { return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--", "/bin/sh", "-c", bashExec) @@ -386,3 +394,50 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa return pod } + +func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface, + namespace string, + teardown bool, + saNames []string) { + bindingString := "Binding" + if teardown { + bindingString = "Unbinding" + } + roleBindingClient := client.RbacV1().RoleBindings(namespace) + for _, saName := range saNames { + By(fmt.Sprintf("%v priviledged Pod Security Policy to the service account %s", bindingString, saName)) + binding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "psp-" + saName, + Namespace: namespace, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: saName, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: podSecurityPolicyPrivilegedClusterRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + } + + roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { + _, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + continue + } + + _, err = roleBindingClient.Create(binding) + framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) + + } +} diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 5d930b8c346..c75988a6378 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -360,7 +360,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ns = f.Namespace.Name }) - Describe("DynamicProvisioner", func() { + Describe("DynamicProvisioner [Slow]", func() { It("should provision storage with different parameters", func() { cloudZone := getRandomCloudZone(c) diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 7b6d5888c41..9075e9eefc8 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -59,6 +59,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { */ BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") + Bootstrap(f) c = f.ClientSet ns = f.Namespace.Name clientPod = nil diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml index 76f1ca9b723..36a43ce4b80 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml @@ -13,7 +13,6 @@ spec: labels: app: csi-gce-pd-driver spec: - serviceAccount: csi-gce-pd containers: - name: csi-external-provisioner imagePullPolicy: Always @@ -42,7 +41,7 @@ spec: mountPath: /csi - name: gce-driver imagePullPolicy: Always - image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha + image: gcr.io/google-containers/volume-csi/gcp-compute-persistent-disk-csi-driver:v0.1.0.alpha args: - "--v=5" - "--endpoint=$(CSI_ENDPOINT)" diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml index 8bfa28bbd0b..85be59f7a11 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml @@ -12,7 +12,6 @@ spec: labels: app: csi-gce-driver spec: - serviceAccount: csi-gce-pd containers: - name: csi-driver-registrar imagePullPolicy: Always @@ -39,7 +38,7 @@ spec: securityContext: privileged: true imagePullPolicy: Always - image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha + image: gcr.io/google-containers/volume-csi/gcp-compute-persistent-disk-csi-driver:v0.1.0.alpha args: - "--v=5" - "--endpoint=$(CSI_ENDPOINT)" diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 43d27d85588..75cabe9c616 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -25,7 +25,6 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/upgrades", deps = [ "//pkg/kubelet/sysctl:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", @@ -34,6 +33,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", diff --git a/test/e2e/upgrades/apps/BUILD b/test/e2e/upgrades/apps/BUILD index 8d58673407e..aa85076116d 100644 --- a/test/e2e/upgrades/apps/BUILD +++ b/test/e2e/upgrades/apps/BUILD @@ -18,7 +18,6 @@ go_library( deps = [ "//pkg/controller:go_default_library", "//pkg/controller/deployment/util:go_default_library", - "//pkg/util/version:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -26,6 +25,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/upgrades:go_default_library", diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 3550f651bdf..35037003891 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -126,11 +126,11 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m nodeNames := make([]string, 0) for _, node := range nodeList.Items { - if len(node.Spec.Taints) == 0 { - nodeNames = append(nodeNames, node.Name) - } else { - framework.Logf("Node %v not expected to have DaemonSet pod, has taints %v", node.Name, node.Spec.Taints) + if len(node.Spec.Taints) != 0 { + framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name) } + // DaemonSet Pods are expected to run on all the nodes in e2e. + nodeNames = append(nodeNames, node.Name) } return checkDaemonPodOnNodes(f, namespace, selector, nodeNames) diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index 83884e35661..ce0abe0c408 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -22,7 +22,7 @@ import ( apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/util/version" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index b96a6d3eb76..101d97d829b 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -29,8 +29,8 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index 3f06c4ed929..6296da28805 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -35,7 +35,7 @@ type ConfigMapUpgradeTest struct { } func (ConfigMapUpgradeTest) Name() string { - return "configmap-upgrade [sig-storage] [sig-api-machinery]" + return "[sig-storage] [sig-api-machinery] configmap-upgrade" } // Setup creates a ConfigMap and then verifies that a pod can consume it. diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index aea9e08ec64..32ad9cbba20 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -29,8 +29,8 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e/upgrades/ingress.go b/test/e2e/upgrades/ingress.go index 855e8dcabf6..5070099ad2e 100644 --- a/test/e2e/upgrades/ingress.go +++ b/test/e2e/upgrades/ingress.go @@ -28,8 +28,8 @@ import ( compute "google.golang.org/api/compute/v1" extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 606032aa644..83c7f31fe1c 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -29,8 +29,8 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index 29c02f31e56..21c532105b8 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -34,7 +34,7 @@ type SecretUpgradeTest struct { secret *v1.Secret } -func (SecretUpgradeTest) Name() string { return "secret-upgrade [sig-storage] [sig-api-machinery]" } +func (SecretUpgradeTest) Name() string { return "[sig-storage] [sig-api-machinery] secret-upgrade" } // Setup creates a secret and then verifies that a pod can consume it. func (t *SecretUpgradeTest) Setup(f *framework.Framework) { diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 8bfc24de0a1..15e20a9b6db 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -33,7 +33,7 @@ type PersistentVolumeUpgradeTest struct { pvc *v1.PersistentVolumeClaim } -func (PersistentVolumeUpgradeTest) Name() string { return "persistent-volume-upgrade [sig-storage]" } +func (PersistentVolumeUpgradeTest) Name() string { return "[sig-storage] persistent-volume-upgrade" } const ( pvTestFile string = "/mnt/volume1/pv_upgrade_test" diff --git a/test/e2e/upgrades/upgrade.go b/test/e2e/upgrades/upgrade.go index 11eeadb0fa1..420feb8ce7b 100644 --- a/test/e2e/upgrades/upgrade.go +++ b/test/e2e/upgrades/upgrade.go @@ -19,7 +19,7 @@ limitations under the License. package upgrades import ( - "k8s.io/kubernetes/pkg/util/version" + "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" ) diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 93ded55e116..2670233e2b3 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -89,7 +89,6 @@ go_test( "gke_environment_test.go", "hugepages_test.go", "image_id_test.go", - "kubelet_test.go", "log_path_test.go", "mirror_pod_test.go", "node_container_manager_test.go", diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 75da903ea7d..62c8fc07497 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -62,7 +62,6 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates[string(features.KubeletPluginsWatcher)] = enablePluginWatcher }) - //devicePluginSockPaths := []string{pluginapi.DevicePluginPath} It("Verifies the Kubelet device plugin functionality.", func() { By("Start stub device plugin") // fake devices for e2e test @@ -80,7 +79,7 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo framework.ExpectNoError(err) By("Register resources") - err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginapi.DevicePluginPath) + err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir) framework.ExpectNoError(err) By("Waiting for the resource exported by the stub device plugin to become available on the local node") @@ -108,16 +107,23 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) Expect(devIdAfterRestart).To(Equal(devId1)) + restartTime := time.Now() By("Restarting Kubelet") restartKubelet() - ensurePodContainerRestart(f, pod1.Name, pod1.Name) - By("Confirming that after a kubelet restart, fake-device assignement is kept") - devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) - Expect(devIdRestart1).To(Equal(devId1)) - + // We need to wait for node to be ready before re-registering stub device plugin. + // Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts. By("Wait for node is ready") - framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) + Eventually(func() bool { + node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + for _, cond := range node.Status.Conditions { + if cond.Type == v1.NodeReady && cond.Status == v1.ConditionTrue && cond.LastHeartbeatTime.After(restartTime) { + return true + } + } + return false + }, 5*time.Minute, framework.Poll).Should(BeTrue()) By("Re-Register resources") dp1 = dm.NewDevicePluginStub(devs, socketPath, resourceName, false) @@ -128,6 +134,11 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo err = dp1.Register(pluginapi.KubeletSocket, resourceName, pluginSockDir) framework.ExpectNoError(err) + ensurePodContainerRestart(f, pod1.Name, pod1.Name) + By("Confirming that after a kubelet restart, fake-device assignement is kept") + devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) + Expect(devIdRestart1).To(Equal(devId1)) + By("Waiting for resource to become available on the local node after re-registration") Eventually(func() bool { node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{}) @@ -191,18 +202,6 @@ func testDevicePlugin(f *framework.Framework, enablePluginWatcher bool, pluginSo return numberOfDevicesCapacity(node, resourceName) <= 0 }, 10*time.Minute, framework.Poll).Should(BeTrue()) - By("Restarting Kubelet second time.") - restartKubelet() - - By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet Eventually.") - ensurePodContainerRestart(f, pod1.Name, pod1.Name) - devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE) - Expect(devIdRestart1).To(Equal(devId1)) - - ensurePodContainerRestart(f, pod2.Name, pod2.Name) - devIdRestart2 = parseLog(f, pod2.Name, pod2.Name, deviceIDRE) - Expect(devIdRestart2).To(Equal(devId2)) - // Cleanup f.PodClient().DeleteSync(pod1.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) f.PodClient().DeleteSync(pod2.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout) diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 3e46edf5faa..d72be3a3a72 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() { }{ { description: "should be able to pull from private registry with credential provider", - image: "gcr.io/authenticated-image-pulling/alpine:3.1", + image: "gcr.io/authenticated-image-pulling/alpine:3.7", phase: v1.PodRunning, waiting: false, }, diff --git a/test/images/Makefile b/test/images/Makefile index 47c9d46ad20..ddb8c80f27a 100644 --- a/test/images/Makefile +++ b/test/images/Makefile @@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest REGISTRY ?= gcr.io/kubernetes-e2e-test-images GOARM=7 QEMUVERSION=v2.9.1 -GOLANG_VERSION=1.10.3 +GOLANG_VERSION=1.10.4 export ifndef WHAT diff --git a/test/images/sample-apiserver/Dockerfile b/test/images/sample-apiserver/Dockerfile index 37e9394d3ef..333dfae10d3 100644 --- a/test/images/sample-apiserver/Dockerfile +++ b/test/images/sample-apiserver/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM k8s.gcr.io/kube-cross:v1.10.3-1 as build_k8s_1_7_sample_apiserver +FROM k8s.gcr.io/kube-cross:v1.10.4-1 as build_k8s_1_7_sample_apiserver ENV GOPATH /go RUN mkdir -p ${GOPATH}/src ${GOPATH}/bin diff --git a/test/images/volume/nfs/run_nfs.sh b/test/images/volume/nfs/run_nfs.sh index 6577e6da6ad..07100645e2e 100755 --- a/test/images/volume/nfs/run_nfs.sh +++ b/test/images/volume/nfs/run_nfs.sh @@ -47,7 +47,7 @@ function start() /usr/sbin/rpcbind -w fi - mount -t nfsd nfds /proc/fs/nfsd + mount -t nfsd nfsd /proc/fs/nfsd # -V 3: enable NFSv3 /usr/sbin/rpc.mountd -N 2 -V 3 diff --git a/test/images/webhook/BUILD b/test/images/webhook/BUILD index ac9bc7669f6..5d75f0e94dc 100644 --- a/test/images/webhook/BUILD +++ b/test/images/webhook/BUILD @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "addlabel.go", "alwaysdeny.go", "config.go", "configmap.go", diff --git a/test/images/webhook/VERSION b/test/images/webhook/VERSION index 1b67ceb7853..fe9e4faa6b2 100644 --- a/test/images/webhook/VERSION +++ b/test/images/webhook/VERSION @@ -1 +1 @@ -1.12v2 +1.13v1 diff --git a/test/images/webhook/addlabel.go b/test/images/webhook/addlabel.go new file mode 100644 index 00000000000..48ff86351cb --- /dev/null +++ b/test/images/webhook/addlabel.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + + "github.com/golang/glog" + "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + addFirstLabelPatch string = `[ + { "op": "add", "path": "/metadata/labels", "value": {"added-label": "yes"}} + ]` + addAdditionalLabelPatch string = `[ + { "op": "add", "path": "/metadata/labels/added-label", "value": "yes" } + ]` +) + +// Add a label {"added-label": "yes"} to the object +func addLabel(ar v1beta1.AdmissionReview) *v1beta1.AdmissionResponse { + glog.V(2).Info("calling add-label") + obj := struct { + metav1.ObjectMeta + Data map[string]string + }{} + raw := ar.Request.Object.Raw + err := json.Unmarshal(raw, &obj) + if err != nil { + glog.Error(err) + return toAdmissionResponse(err) + } + + reviewResponse := v1beta1.AdmissionResponse{} + reviewResponse.Allowed = true + if len(obj.ObjectMeta.Labels) == 0 { + reviewResponse.Patch = []byte(addFirstLabelPatch) + } else { + reviewResponse.Patch = []byte(addAdditionalLabelPatch) + } + pt := v1beta1.PatchTypeJSONPatch + reviewResponse.PatchType = &pt + return &reviewResponse +} diff --git a/test/images/webhook/main.go b/test/images/webhook/main.go index aca37e46abf..37c0a1de8a9 100644 --- a/test/images/webhook/main.go +++ b/test/images/webhook/main.go @@ -95,6 +95,10 @@ func serveAlwaysDeny(w http.ResponseWriter, r *http.Request) { serve(w, r, alwaysDeny) } +func serveAddLabel(w http.ResponseWriter, r *http.Request) { + serve(w, r, addLabel) +} + func servePods(w http.ResponseWriter, r *http.Request) { serve(w, r, admitPods) } @@ -133,6 +137,7 @@ func main() { flag.Parse() http.HandleFunc("/always-deny", serveAlwaysDeny) + http.HandleFunc("/add-label", serveAddLabel) http.HandleFunc("/pods", servePods) http.HandleFunc("/pods/attach", serveAttachingPods) http.HandleFunc("/mutating-pods", serveMutatePods) diff --git a/test/integration/BUILD b/test/integration/BUILD index 66262dff766..1fe6fa9d2ce 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -42,6 +42,7 @@ filegroup( "//test/integration/client:all-srcs", "//test/integration/configmap:all-srcs", "//test/integration/controllermanager:all-srcs", + "//test/integration/cronjob:all-srcs", "//test/integration/daemonset:all-srcs", "//test/integration/defaulttolerationseconds:all-srcs", "//test/integration/deployment:all-srcs", diff --git a/test/integration/cronjob/BUILD b/test/integration/cronjob/BUILD new file mode 100644 index 00000000000..57808cd8a6e --- /dev/null +++ b/test/integration/cronjob/BUILD @@ -0,0 +1,43 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_test", +) + +go_test( + name = "go_default_test", + size = "large", + srcs = [ + "cronjob_test.go", + "main_test.go", + ], + tags = ["integration"], + deps = [ + "//pkg/controller/cronjob:go_default_library", + "//pkg/controller/job:go_default_library", + "//staging/src/k8s.io/api/batch/v1:go_default_library", + "//staging/src/k8s.io/api/batch/v1beta1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/informers:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes/typed/batch/v1beta1:go_default_library", + "//staging/src/k8s.io/client-go/rest:go_default_library", + "//test/integration/framework:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/test/integration/cronjob/cronjob_test.go b/test/integration/cronjob/cronjob_test.go new file mode 100644 index 00000000000..1a719a6b934 --- /dev/null +++ b/test/integration/cronjob/cronjob_test.go @@ -0,0 +1,174 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronjob + +import ( + "fmt" + "net/http/httptest" + "testing" + "time" + + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + clientset "k8s.io/client-go/kubernetes" + clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + "k8s.io/client-go/rest" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/controller/cronjob" + "k8s.io/kubernetes/pkg/controller/job" + "k8s.io/kubernetes/test/integration/framework" +) + +func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.CronJobController, *job.JobController, informers.SharedInformerFactory, clientset.Interface, rest.Config) { + masterConfig := framework.NewIntegrationTestMasterConfig() + _, server, closeFn := framework.RunAMaster(masterConfig) + + config := restclient.Config{Host: server.URL} + clientSet, err := clientset.NewForConfig(&config) + if err != nil { + t.Fatalf("Error creating clientset: %v", err) + } + resyncPeriod := 12 * time.Hour + informerSet := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "cronjob-informers")), resyncPeriod) + cjc, err := cronjob.NewCronJobController(clientSet) + if err != nil { + t.Fatalf("Error creating CronJob controller: %v", err) + } + jc := job.NewJobController(informerSet.Core().V1().Pods(), informerSet.Batch().V1().Jobs(), clientSet) + + return server, closeFn, cjc, jc, informerSet, clientSet, config +} + +func newCronJob(name, namespace, schedule string) *batchv1beta1.CronJob { + zero64 := int64(0) + zero32 := int32(0) + return &batchv1beta1.CronJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "CronJob", + APIVersion: "batch/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Spec: batchv1beta1.CronJobSpec{ + Schedule: schedule, + SuccessfulJobsHistoryLimit: &zero32, + JobTemplate: batchv1beta1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "foo", Image: "bar"}}, + TerminationGracePeriodSeconds: &zero64, + RestartPolicy: "Never", + }, + }, + }, + }, + }, + } +} + +func cleanupCronJobs(t *testing.T, cjClient clientbatchv1beta1.CronJobInterface, name string) { + deletePropagation := metav1.DeletePropagationForeground + err := cjClient.Delete(name, &metav1.DeleteOptions{PropagationPolicy: &deletePropagation}) + if err != nil { + t.Errorf("Failed to delete CronJob: %v", err) + } +} + +func validateJobAndPod(t *testing.T, clientSet kubernetes.Interface, namespace string) { + if err := wait.PollImmediate(1*time.Second, 120*time.Second, func() (bool, error) { + jobs, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list jobs: %v", err) + } + + if len(jobs.Items) == 0 { + return false, nil + } + + for _, j := range jobs.Items { + ownerReferences := j.ObjectMeta.OwnerReferences + if refCount := len(ownerReferences); refCount != 1 { + return false, fmt.Errorf("job %s has %d OwnerReferences, expected only 1", j.Name, refCount) + } + + reference := ownerReferences[0] + if reference.Kind != "CronJob" { + return false, fmt.Errorf("job %s has OwnerReference with Kind %s, expected CronJob", j.Name, reference.Kind) + } + } + + pods, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list pods: %v", err) + } + + if len(pods.Items) != 1 { + return false, nil + } + + for _, pod := range pods.Items { + ownerReferences := pod.ObjectMeta.OwnerReferences + if refCount := len(ownerReferences); refCount != 1 { + return false, fmt.Errorf("pod %s has %d OwnerReferences, expected only 1", pod.Name, refCount) + } + + reference := ownerReferences[0] + if reference.Kind != "Job" { + return false, fmt.Errorf("pod %s has OwnerReference with Kind %s, expected Job", pod.Name, reference.Kind) + } + } + return true, nil + }); err != nil { + t.Fatal(err) + } +} + +func TestCronJobLaunchesPodAndCleansUp(t *testing.T) { + server, closeFn, cjc, jc, informerSet, clientSet, _ := setup(t) + defer closeFn() + + cronJobName := "foo" + namespaceName := "simple-cronjob-test" + + ns := framework.CreateTestingNamespace(namespaceName, server, t) + defer framework.DeleteTestingNamespace(ns, server, t) + + cjClient := clientSet.BatchV1beta1().CronJobs(ns.Name) + + stopCh := make(chan struct{}) + defer close(stopCh) + + informerSet.Start(stopCh) + go cjc.Run(stopCh) + go jc.Run(1, stopCh) + + _, err := cjClient.Create(newCronJob(cronJobName, ns.Name, "* * * * ?")) + if err != nil { + t.Fatalf("Failed to create CronJob: %v", err) + } + defer cleanupCronJobs(t, cjClient, cronJobName) + + validateJobAndPod(t, clientSet, namespaceName) +} diff --git a/test/integration/cronjob/main_test.go b/test/integration/cronjob/main_test.go new file mode 100644 index 00000000000..bcbf8f26151 --- /dev/null +++ b/test/integration/cronjob/main_test.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronjob + +import ( + "k8s.io/kubernetes/test/integration/framework" + "testing" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 5a7ba8451d5..4a88a07181e 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/test/integration/framework" + "reflect" ) const ( @@ -165,6 +166,82 @@ func TestConcurrentEvictionRequests(t *testing.T) { } } +// TestTerminalPodEviction ensures that PDB is not checked for terminal pods. +func TestTerminalPodEviction(t *testing.T) { + s, closeFn, rm, informers, clientSet := rmSetup(t) + defer closeFn() + + ns := framework.CreateTestingNamespace("terminalpod-eviction", s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + stopCh := make(chan struct{}) + informers.Start(stopCh) + go rm.Run(stopCh) + defer close(stopCh) + + config := restclient.Config{Host: s.URL} + clientSet, err := clientset.NewForConfig(&config) + if err != nil { + t.Fatalf("Failed to create clientset: %v", err) + } + + var gracePeriodSeconds int64 = 30 + deleteOption := &metav1.DeleteOptions{ + GracePeriodSeconds: &gracePeriodSeconds, + } + pod := newPod("test-terminal-pod1") + if _, err := clientSet.CoreV1().Pods(ns.Name).Create(pod); err != nil { + t.Errorf("Failed to create pod: %v", err) + } + addPodConditionSucceeded(pod) + if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(pod); err != nil { + t.Fatal(err) + } + + waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1) + + pdb := newPDB() + if _, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Create(pdb); err != nil { + t.Errorf("Failed to create PodDisruptionBudget: %v", err) + } + + pdbList, err := clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Error while listing pod disruption budget") + } + oldPdb := pdbList.Items[0] + eviction := newEviction(ns.Name, pod.Name, deleteOption) + err = wait.PollImmediate(5*time.Second, 60*time.Second, func() (bool, error) { + e := clientSet.Policy().Evictions(ns.Name).Evict(eviction) + switch { + case errors.IsTooManyRequests(e): + return false, nil + case errors.IsConflict(e): + return false, fmt.Errorf("Unexpected Conflict (409) error caused by failing to handle concurrent PDB updates: %v", e) + case e == nil: + return true, nil + default: + return false, e + } + }) + if err != nil { + t.Fatalf("Eviction of pod failed %v", err) + } + pdbList, err = clientSet.Policy().PodDisruptionBudgets(ns.Name).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Error while listing pod disruption budget") + } + newPdb := pdbList.Items[0] + // We shouldn't see an update in pod disruption budget status' generation number as we are evicting terminal pods without checking for pod disruption. + if !reflect.DeepEqual(newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) { + t.Fatalf("Expected the pdb generation to be of same value %v but got %v", newPdb.Status.ObservedGeneration, oldPdb.Status.ObservedGeneration) + } + + if err := clientSet.Policy().PodDisruptionBudgets(ns.Name).Delete(pdb.Name, deleteOption); err != nil { + t.Fatalf("Failed to delete pod disruption budget") + } +} + func newPod(podName string) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -182,6 +259,18 @@ func newPod(podName string) *v1.Pod { } } +func addPodConditionSucceeded(pod *v1.Pod) { + pod.Status = v1.PodStatus{ + Phase: v1.PodSucceeded, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + }, + }, + } +} + func addPodConditionReady(pod *v1.Pod) { pod.Status = v1.PodStatus{ Phase: v1.PodRunning, diff --git a/test/integration/framework/etcd.go b/test/integration/framework/etcd.go index 790c13ff8ad..d7f43ec2a38 100644 --- a/test/integration/framework/etcd.go +++ b/test/integration/framework/etcd.go @@ -17,93 +17,126 @@ limitations under the License. package framework import ( + "context" "fmt" - "hash/adler32" - "io" "io/ioutil" - "math/rand" + "net" "os" "os/exec" "path/filepath" - "sync" + "strings" "github.com/golang/glog" "k8s.io/kubernetes/pkg/util/env" ) -var ( - etcdSetup sync.Once - etcdURL = "" -) +var etcdURL = "" -func setupETCD() { - etcdSetup.Do(func() { - if os.Getenv("RUNFILES_DIR") == "" { - etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379") - return - } - etcdPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd") - // give every test the same random port each run - etcdPort := 20000 + rand.New(rand.NewSource(int64(adler32.Checksum([]byte(os.Args[0]))))).Intn(5000) - etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort) +const installEtcd = ` +Cannot find etcd, cannot run integration tests +Please see https://github.com/kubernetes/community/blob/master/contributors/devel/testing.md#install-etcd-dependency for instructions. - info, err := os.Stat(etcdPath) - if err != nil { - glog.Fatalf("Unable to stat etcd: %v", err) - } - if info.IsDir() { - glog.Fatalf("Did not expect %q to be a directory", etcdPath) - } +You can use 'hack/install-etcd.sh' to install a copy in third_party/. - etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data") - if err != nil { - glog.Fatalf("Unable to make temp etcd data dir: %v", err) - } - glog.Infof("storing etcd data in: %v", etcdDataDir) +` - etcdCmd := exec.Command( - etcdPath, - "--data-dir", - etcdDataDir, - "--listen-client-urls", - GetEtcdURL(), - "--advertise-client-urls", - GetEtcdURL(), - "--listen-peer-urls", - "http://127.0.0.1:0", - ) - - stdout, err := etcdCmd.StdoutPipe() - if err != nil { - glog.Fatalf("Failed to run etcd: %v", err) - } - stderr, err := etcdCmd.StderrPipe() - if err != nil { - glog.Fatalf("Failed to run etcd: %v", err) - } - if err := etcdCmd.Start(); err != nil { - glog.Fatalf("Failed to run etcd: %v", err) - } - - go io.Copy(os.Stdout, stdout) - go io.Copy(os.Stderr, stderr) - - go func() { - if err := etcdCmd.Wait(); err != nil { - glog.Fatalf("Failed to run etcd: %v", err) - } - glog.Fatalf("etcd should not have succeeded") - }() - }) +// getEtcdPath returns a path to an etcd executable. +func getEtcdPath() (string, error) { + bazelPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd") + p, err := exec.LookPath(bazelPath) + if err == nil { + return p, nil + } + return exec.LookPath("etcd") } +// getAvailablePort returns a TCP port that is available for binding. +func getAvailablePort() (int, error) { + l, err := net.Listen("tcp", ":0") + if err != nil { + return 0, fmt.Errorf("could not bind to a port: %v", err) + } + // It is possible but unlikely that someone else will bind this port before we + // get a chance to use it. + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +// startEtcd executes an etcd instance. The returned function will signal the +// etcd process and wait for it to exit. +func startEtcd() (func(), error) { + etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379") + conn, err := net.Dial("tcp", strings.TrimPrefix(etcdURL, "http://")) + if err == nil { + glog.Infof("etcd already running at %s", etcdURL) + conn.Close() + return func() {}, nil + } + glog.V(1).Infof("could not connect to etcd: %v", err) + + // TODO: Check for valid etcd version. + etcdPath, err := getEtcdPath() + if err != nil { + fmt.Fprintf(os.Stderr, installEtcd) + return nil, fmt.Errorf("could not find etcd in PATH: %v", err) + } + etcdPort, err := getAvailablePort() + if err != nil { + return nil, fmt.Errorf("could not get a port: %v", err) + } + etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort) + glog.Infof("starting etcd on %s", etcdURL) + + etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data") + if err != nil { + return nil, fmt.Errorf("unable to make temp etcd data dir: %v", err) + } + glog.Infof("storing etcd data in: %v", etcdDataDir) + + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext( + ctx, + etcdPath, + "--data-dir", + etcdDataDir, + "--listen-client-urls", + GetEtcdURL(), + "--advertise-client-urls", + GetEtcdURL(), + "--listen-peer-urls", + "http://127.0.0.1:0", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + stop := func() { + cancel() + err := cmd.Wait() + glog.Infof("etcd exit status: %v", err) + err = os.RemoveAll(etcdDataDir) + if err != nil { + glog.Warningf("error during etcd cleanup: %v", err) + } + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to run etcd: %v", err) + } + return stop, nil +} + +// EtcdMain starts an etcd instance before running tests. func EtcdMain(tests func() int) { - setupETCD() - os.Exit(tests()) + stop, err := startEtcd() + if err != nil { + glog.Fatalf("cannot run integration tests: unable to start etcd: %v", err) + } + result := tests() + stop() // Don't defer this. See os.Exit documentation. + os.Exit(result) } -// return the EtcdURL +// GetEtcdURL returns the URL of the etcd instance started by EtcdMain. func GetEtcdURL() string { return etcdURL } diff --git a/test/integration/master/kube_apiserver_test.go b/test/integration/master/kube_apiserver_test.go index 187b1da1b00..050b2f75844 100644 --- a/test/integration/master/kube_apiserver_test.go +++ b/test/integration/master/kube_apiserver_test.go @@ -19,6 +19,7 @@ package master import ( "encoding/json" "fmt" + "net/http" "reflect" "strings" "testing" @@ -98,8 +99,8 @@ func TestOpenAPIDelegationChainPlumbing(t *testing.T) { result := kubeclient.RESTClient().Get().AbsPath("/swagger.json").Do() status := 0 result.StatusCode(&status) - if status != 200 { - t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", 200, status) + if status != http.StatusOK { + t.Fatalf("GET /swagger.json failed: expected status=%d, got=%d", http.StatusOK, status) } raw, err := result.Raw() diff --git a/test/integration/metrics/metrics_test.go b/test/integration/metrics/metrics_test.go index 9b4391f5a62..9269f7ba4e1 100644 --- a/test/integration/metrics/metrics_test.go +++ b/test/integration/metrics/metrics_test.go @@ -54,7 +54,7 @@ func scrapeMetrics(s *httptest.Server) ([]*prometheuspb.MetricFamily, error) { return nil, fmt.Errorf("Unable to contact metrics endpoint of master: %v", err) } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("Non-200 response trying to scrape metrics from master: %v", resp) } diff --git a/test/integration/quota/BUILD b/test/integration/quota/BUILD index 107a4bd9030..3bf9cd52aab 100644 --- a/test/integration/quota/BUILD +++ b/test/integration/quota/BUILD @@ -14,13 +14,11 @@ go_test( ], tags = ["integration"], deps = [ - "//pkg/client/clientset_generated/internalclientset:go_default_library", - "//pkg/client/informers/informers_generated/internalversion:go_default_library", "//pkg/controller:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/controller/resourcequota:go_default_library", - "//pkg/quota/generic:go_default_library", - "//pkg/quota/install:go_default_library", + "//pkg/quota/v1/generic:go_default_library", + "//pkg/quota/v1/install:go_default_library", "//plugin/pkg/admission/resourcequota:go_default_library", "//plugin/pkg/admission/resourcequota/apis/resourcequota:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/test/integration/quota/quota_test.go b/test/integration/quota/quota_test.go index 9bc6f384bfb..9f6a9a418bf 100644 --- a/test/integration/quota/quota_test.go +++ b/test/integration/quota/quota_test.go @@ -37,13 +37,11 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" watchtools "k8s.io/client-go/tools/watch" - "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - internalinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion" "k8s.io/kubernetes/pkg/controller" replicationcontroller "k8s.io/kubernetes/pkg/controller/replication" resourcequotacontroller "k8s.io/kubernetes/pkg/controller/resourcequota" - "k8s.io/kubernetes/pkg/quota/generic" - quotainstall "k8s.io/kubernetes/pkg/quota/install" + "k8s.io/kubernetes/pkg/quota/v1/generic" + quotainstall "k8s.io/kubernetes/pkg/quota/v1/install" "k8s.io/kubernetes/plugin/pkg/admission/resourcequota" resourcequotaapi "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota" "k8s.io/kubernetes/test/integration/framework" @@ -65,15 +63,14 @@ func TestQuota(t *testing.T) { admissionCh := make(chan struct{}) clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) config := &resourcequotaapi.Configuration{} admission, err := resourcequota.NewResourceQuota(config, 5, admissionCh) if err != nil { t.Fatalf("unexpected error: %v", err) } - admission.SetInternalKubeClientSet(internalClientset) - internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc()) - admission.SetInternalKubeInformerFactory(internalInformers) + admission.SetExternalKubeClientSet(clientset) + internalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) + admission.SetExternalKubeInformerFactory(internalInformers) qca := quotainstall.NewQuotaConfigurationForAdmission() admission.SetQuotaConfiguration(qca) defer close(admissionCh) @@ -257,7 +254,6 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { admissionCh := make(chan struct{}) clientset := clientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - internalClientset := internalclientset.NewForConfigOrDie(&restclient.Config{QPS: -1, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) // stop creation of a pod resource unless there is a quota config := &resourcequotaapi.Configuration{ @@ -273,9 +269,9 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - admission.SetInternalKubeClientSet(internalClientset) - internalInformers := internalinformers.NewSharedInformerFactory(internalClientset, controller.NoResyncPeriodFunc()) - admission.SetInternalKubeInformerFactory(internalInformers) + admission.SetExternalKubeClientSet(clientset) + externalInformers := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) + admission.SetExternalKubeInformerFactory(externalInformers) admission.SetQuotaConfiguration(qca) defer close(admissionCh) @@ -324,7 +320,7 @@ func TestQuotaLimitedResourceDenial(t *testing.T) { // Periodically the quota controller to detect new resource types go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, controllerCh) - internalInformers.Start(controllerCh) + externalInformers.Start(controllerCh) informers.Start(controllerCh) close(informersStarted) diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index 4943e0eb83f..c78e0218e41 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -50,10 +50,8 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index d5832a0bd86..27dd50cdf42 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -52,7 +52,7 @@ func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, ti } return false, err }); err != nil { - return fmt.Errorf("Pod %v annotation did not get set: %v", pod.Name, err) + return fmt.Errorf("Pod %v/%v annotation did not get set: %v", pod.Namespace, pod.Name, err) } return nil } @@ -268,7 +268,7 @@ func TestPreemption(t *testing.T) { for i, p := range pods { if _, found := test.preemptedPodIndexes[i]; found { if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil { - t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name) + t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name) } } else { if p.DeletionTimestamp != nil { @@ -450,7 +450,7 @@ func TestPreemptionStarvation(t *testing.T) { // make sure that runningPods are all scheduled. for _, p := range runningPods { if err := waitForPodToSchedule(cs, p); err != nil { - t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err) + t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err) } } // Create pending pods. @@ -464,7 +464,7 @@ func TestPreemptionStarvation(t *testing.T) { for _, p := range pendingPods { if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, podUnschedulable(cs, p.Namespace, p.Name)); err != nil { - t.Errorf("Pod %v didn't get marked unschedulable: %v", p.Name, err) + t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err) } } // Create the preemptor. @@ -474,7 +474,7 @@ func TestPreemptionStarvation(t *testing.T) { } // Check that the preemptor pod gets the annotation for nominated node name. if err := waitForNominatedNodeName(cs, preemptor); err != nil { - t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err) + t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err) } // Make sure that preemptor is scheduled after preemptions. if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil { @@ -532,7 +532,7 @@ func TestNominatedNodeCleanUp(t *testing.T) { // make sure that the pods are all scheduled. for _, p := range lowPriPods { if err := waitForPodToSchedule(cs, p); err != nil { - t.Fatalf("Pod %v didn't get scheduled: %v", p.Name, err) + t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err) } } // Step 2. Create a medium priority pod. @@ -551,7 +551,7 @@ func TestNominatedNodeCleanUp(t *testing.T) { } // Step 3. Check that nominated node name of the medium priority pod is set. if err := waitForNominatedNodeName(cs, medPriPod); err != nil { - t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err) + t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err) } // Step 4. Create a high priority pod. podConf = initPausePod(cs, &pausePodConfig{ @@ -569,7 +569,7 @@ func TestNominatedNodeCleanUp(t *testing.T) { } // Step 5. Check that nominated node name of the high priority pod is set. if err := waitForNominatedNodeName(cs, highPriPod); err != nil { - t.Errorf("NominatedNodeName annotation was not set for pod %v: %v", medPriPod.Name, err) + t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", medPriPod.Namespace, medPriPod.Name, err) } // And the nominated node name of the medium priority pod is cleared. if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { @@ -842,8 +842,8 @@ func TestPDBInPreemption(t *testing.T) { t.Fatalf("Failed to create PDB: %v", err) } } - // Wait for PDBs to show up in the scheduler's cache and become stable. - if err := waitCachedPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil { + // Wait for PDBs to become stable. + if err := waitForPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil { t.Fatalf("Not all pdbs are stable in the cache: %v", err) } @@ -856,18 +856,18 @@ func TestPDBInPreemption(t *testing.T) { for i, p := range pods { if _, found := test.preemptedPodIndexes[i]; found { if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil { - t.Errorf("Test [%v]: Pod %v is not getting evicted.", test.description, p.Name) + t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name) } } else { if p.DeletionTimestamp != nil { - t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name) + t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.description, p.Namespace, p.Name) } } } // Also check that the preemptor pod gets the annotation for nominated node name. if len(test.preemptedPodIndexes) > 0 { if err := waitForNominatedNodeName(cs, preemptor); err != nil { - t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v: %v", test.description, preemptor.Name, err) + t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err) } } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 608f679b9fb..3151768454a 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -20,19 +20,13 @@ package scheduler import ( "fmt" - "reflect" "testing" "time" "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" @@ -671,95 +665,6 @@ func TestAllocatable(t *testing.T) { } } -// TestPDBCache verifies that scheduler cache works as expected when handling -// PodDisruptionBudget. -func TestPDBCache(t *testing.T) { - context := initTest(t, "pdbcache") - defer cleanupTest(t, context) - - intstrMin := intstr.FromInt(4) - pdb := &policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: context.ns.Name, - Name: "test-pdb", - UID: types.UID("test-pdb-uid"), - Labels: map[string]string{"tkey1": "tval1", "tkey2": "tval2"}, - }, - Spec: policy.PodDisruptionBudgetSpec{ - MinAvailable: &intstrMin, - Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"tkey": "tvalue"}}, - }, - } - - createdPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb) - if err != nil { - t.Errorf("Failed to create PDB: %v", err) - } - // Wait for PDB to show up in the scheduler's cache. - if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) { - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if err != nil { - t.Errorf("Error while polling for PDB: %v", err) - return false, err - } - return len(cachedPDBs) > 0, err - }); err != nil { - t.Fatalf("No PDB was added to the cache: %v", err) - } - // Read PDB from the cache and compare it. - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if len(cachedPDBs) != 1 { - t.Fatalf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs)) - } - if !reflect.DeepEqual(createdPDB, cachedPDBs[0]) { - t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(createdPDB, cachedPDBs[0])) - } - - // Update PDB and change its labels. - pdbCopy := *cachedPDBs[0] - pdbCopy.Labels = map[string]string{} - updatedPDB, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Update(&pdbCopy) - if err != nil { - t.Errorf("Failed to update PDB: %v", err) - } - // Wait for PDB to be updated in the scheduler's cache. - if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) { - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if err != nil { - t.Errorf("Error while polling for PDB: %v", err) - return false, err - } - return len(cachedPDBs[0].Labels) == 0, err - }); err != nil { - t.Fatalf("No PDB was updated in the cache: %v", err) - } - // Read PDB from the cache and compare it. - cachedPDBs, err = context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if len(cachedPDBs) != 1 { - t.Errorf("Expected to have 1 pdb in cache, but found %d.", len(cachedPDBs)) - } - if !reflect.DeepEqual(updatedPDB, cachedPDBs[0]) { - t.Errorf("Got different PDB than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(updatedPDB, cachedPDBs[0])) - } - - // Delete PDB. - err = context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Delete(pdb.Name, &metav1.DeleteOptions{}) - if err != nil { - t.Errorf("Failed to delete PDB: %v", err) - } - // Wait for PDB to be deleted from the scheduler's cache. - if err = wait.Poll(time.Second, 15*time.Second, func() (bool, error) { - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) - if err != nil { - t.Errorf("Error while polling for PDB: %v", err) - return false, err - } - return len(cachedPDBs) == 0, err - }); err != nil { - t.Errorf("No PDB was deleted from the cache: %v", err) - } -} - // TestSchedulerInformers tests that scheduler receives informer events and updates its cache when // pods are scheduled by other schedulers. func TestSchedulerInformers(t *testing.T) { diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index c2e73dd5d68..2ed1d5c45c1 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -498,10 +498,10 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) { return nil, fmt.Errorf("Error creating pause pod: %v", err) } if err = waitForPodToSchedule(cs, pod); err != nil { - return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err) + return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err) } if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil { - return pod, fmt.Errorf("Error getting pod %v info: %v", pod.Name, err) + return pod, fmt.Errorf("Error getting pod %v/%v info: %v", pod.Namespace, pod.Name, err) } return pod, nil } @@ -631,20 +631,20 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error { return waitForPodUnschedulableWithTimeout(cs, pod, 30*time.Second) } -// waitCachedPDBsStable waits for PDBs in scheduler cache to have "CurrentHealthy" status equal to +// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to // the expected values. -func waitCachedPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { +func waitForPDBsStable(context *TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error { return wait.Poll(time.Second, 60*time.Second, func() (bool, error) { - cachedPDBs, err := context.scheduler.Config().SchedulerCache.ListPDBs(labels.Everything()) + pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{}) if err != nil { return false, err } - if len(cachedPDBs) != len(pdbs) { + if len(pdbList.Items) != len(pdbs) { return false, nil } for i, pdb := range pdbs { found := false - for _, cpdb := range cachedPDBs { + for _, cpdb := range pdbList.Items { if pdb.Name == cpdb.Name && pdb.Namespace == cpdb.Namespace { found = true if cpdb.Status.CurrentHealthy != pdbPodNum[i] { diff --git a/test/integration/volume/BUILD b/test/integration/volume/BUILD index 156b58ab2d4..0e6231fde6c 100644 --- a/test/integration/volume/BUILD +++ b/test/integration/volume/BUILD @@ -26,7 +26,6 @@ go_test( "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", - "//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index 956b90c587e..aa72838c058 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -23,7 +23,6 @@ import ( "time" "k8s.io/api/core/v1" - fakeapiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -394,7 +393,6 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy } resyncPeriod := 12 * time.Hour testClient := clientset.NewForConfigOrDie(&config) - fakeApiExtensionsClient := fakeapiextensionsclient.NewSimpleClientset() host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil) plugin := &volumetest.FakeVolumePlugin{ @@ -415,7 +413,6 @@ func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, sy ctrl, err := attachdetach.NewAttachDetachController( testClient, nil, /* csiClient */ - fakeApiExtensionsClient, /* crdClient */ informers.Core().V1().Pods(), informers.Core().V1().Nodes(), informers.Core().V1().PersistentVolumeClaims(), diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index 39784517dda..b3e17bfb746 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -64,7 +64,7 @@ SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}" EVENT_PD="${EVENT_PD:-}" # Etcd related variables. -ETCD_IMAGE="${ETCD_IMAGE:-3.2.24-0}" +ETCD_IMAGE="${ETCD_IMAGE:-3.2.24-1}" ETCD_VERSION="${ETCD_VERSION:-}" # Controller-manager related variables. diff --git a/test/test_owners.csv b/test/test_owners.csv index 909fe148a88..fbdaa5700b3 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -809,9 +809,9 @@ k8s.io/kubernetes/pkg/util/tail,zmerlynn,1, k8s.io/kubernetes/pkg/util/taints,rrati,0, k8s.io/kubernetes/pkg/util/term,davidopp,1, k8s.io/kubernetes/pkg/util/threading,roberthbailey,1, -k8s.io/kubernetes/pkg/util/version,danwinship,0, +k8s.io/apimachinery/pkg/util/version,danwinship,0, k8s.io/kubernetes/pkg/volume,saad-ali,0, -k8s.io/kubernetes/pkg/volume/aws_ebs,caesarxuchao,1, +k8s.io/kubernetes/pkg/volume/awsebs,caesarxuchao,1, k8s.io/kubernetes/pkg/volume/azure_dd,bgrant0607,1, k8s.io/kubernetes/pkg/volume/azure_file,maisem,1, k8s.io/kubernetes/pkg/volume/cephfs,eparis,1, diff --git a/test/utils/image/BUILD b/test/utils/image/BUILD index 36f069e425c..4693827e8eb 100644 --- a/test/utils/image/BUILD +++ b/test/utils/image/BUILD @@ -9,6 +9,9 @@ go_library( name = "go_default_library", srcs = ["manifest.go"], importpath = "k8s.io/kubernetes/test/utils/image", + deps = [ + "//vendor/gopkg.in/yaml.v2:go_default_library", + ], ) filegroup( diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go index 1373d0ca1c7..cbc174b2265 100644 --- a/test/utils/image/manifest.go +++ b/test/utils/image/manifest.go @@ -18,16 +18,19 @@ package image import ( "fmt" + "io/ioutil" + "os" + + yaml "gopkg.in/yaml.v2" ) -const ( - dockerLibraryRegistry = "docker.io/library" - e2eRegistry = "gcr.io/kubernetes-e2e-test-images" - gcRegistry = "k8s.gcr.io" - PrivateRegistry = "gcr.io/k8s-authenticated-test" - sampleRegistry = "gcr.io/google-samples" -) - +type RegistryList struct { + DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"` + E2eRegistry string `yaml:"e2eRegistry"` + GcRegistry string `yaml:"gcRegistry"` + PrivateRegistry string `yaml:"privateRegistry"` + SampleRegistry string `yaml:"sampleRegistry"` +} type ImageConfig struct { registry string name string @@ -46,8 +49,40 @@ func (i *ImageConfig) SetVersion(version string) { i.version = version } +func initReg() RegistryList { + registry := RegistryList{ + DockerLibraryRegistry: "docker.io/library", + E2eRegistry: "gcr.io/kubernetes-e2e-test-images", + GcRegistry: "k8s.gcr.io", + PrivateRegistry: "gcr.io/k8s-authenticated-test", + SampleRegistry: "gcr.io/google-samples", + } + repoList := os.Getenv("KUBE_TEST_REPO_LIST") + if repoList == "" { + return registry + } + + fileContent, err := ioutil.ReadFile(repoList) + if err != nil { + panic(fmt.Errorf("Error reading '%v' file contents: %v", repoList, err)) + } + + err = yaml.Unmarshal(fileContent, ®istry) + if err != nil { + panic(fmt.Errorf("Error unmarshalling '%v' YAML file: %v", repoList, err)) + } + return registry +} + var ( - AdmissionWebhook = ImageConfig{e2eRegistry, "webhook", "1.12v2"} + registry = initReg() + dockerLibraryRegistry = registry.DockerLibraryRegistry + e2eRegistry = registry.E2eRegistry + gcRegistry = registry.GcRegistry + PrivateRegistry = registry.PrivateRegistry + sampleRegistry = registry.SampleRegistry + + AdmissionWebhook = ImageConfig{e2eRegistry, "webhook", "1.13v1"} APIServer = ImageConfig{e2eRegistry, "sample-apiserver", "1.0"} AppArmorLoader = ImageConfig{e2eRegistry, "apparmor-loader", "1.0"} BusyBox = ImageConfig{dockerLibraryRegistry, "busybox", "1.29"} diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go index ebcdd808c34..ab10717089e 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go +++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -5,12 +5,19 @@ package ipvs import ( "net" "syscall" + "time" "fmt" + "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" ) +const ( + netlinkRecvSocketsTimeout = 3 * time.Second + netlinkSendSocketTimeout = 30 * time.Second +) + // Service defines an IPVS service in its entirety. type Service struct { // Virtual service address. @@ -46,13 +53,15 @@ type SvcStats struct { // Destination defines an IPVS destination (real server) in its // entirety. type Destination struct { - Address net.IP - Port uint16 - Weight int - ConnectionFlags uint32 - AddressFamily uint16 - UpperThreshold uint32 - LowerThreshold uint32 + Address net.IP + Port uint16 + Weight int + ConnectionFlags uint32 + AddressFamily uint16 + UpperThreshold uint32 + LowerThreshold uint32 + ActiveConnections int + InactiveConnections int } // Handle provides a namespace specific ipvs handle to program ipvs @@ -82,6 +91,15 @@ func New(path string) (*Handle, error) { if err != nil { return nil, err } + // Add operation timeout to avoid deadlocks + tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds()) + if err := sock.SetSendTimeout(&tv); err != nil { + return nil, err + } + tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds()) + if err := sock.SetReceiveTimeout(&tv); err != nil { + return nil, err + } return &Handle{sock: sock}, nil } diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go index 2089283d14e..3c7b1a562ae 100644 --- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go +++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -100,7 +100,7 @@ func fillService(s *Service) nl.NetlinkRequestData { return cmdAttr } -func fillDestinaton(d *Destination) nl.NetlinkRequestData { +func fillDestination(d *Destination) nl.NetlinkRequestData { cmdAttr := nl.NewRtAttr(ipvsCmdAttrDest, nil) nl.NewRtAttrChild(cmdAttr, ipvsDestAttrAddress, rawIPData(d.Address)) @@ -134,7 +134,7 @@ func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]b } } else { - req.AddData(fillDestinaton(d)) + req.AddData(fillDestination(d)) } res, err := execute(i.sock, req, 0) @@ -203,10 +203,6 @@ func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest { } func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) { - var ( - err error - ) - if err := s.Send(req); err != nil { return nil, err } @@ -222,6 +218,13 @@ done: for { msgs, err := s.Receive() if err != nil { + if s.GetFd() == -1 { + return nil, fmt.Errorf("Socket got closed on receive") + } + if err == syscall.EAGAIN { + // timeout fired + continue + } return nil, err } for _, m := range msgs { @@ -436,6 +439,10 @@ func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) d.LowerThreshold = native.Uint32(attr.Value) case ipvsDestAttrAddressFamily: d.AddressFamily = native.Uint16(attr.Value) + case ipvsDestAttrActiveConnections: + d.ActiveConnections = int(native.Uint16(attr.Value)) + case ipvsDestAttrInactiveConnections: + d.InactiveConnections = int(native.Uint16(attr.Value)) } } return &d, nil diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml index 449e67cd01a..fbb43744d94 100644 --- a/vendor/github.com/modern-go/reflect2/.travis.yml +++ b/vendor/github.com/modern-go/reflect2/.travis.yml @@ -6,6 +6,7 @@ go: before_install: - go get -t -v ./... + - go get -t -v github.com/modern-go/reflect2-tests/... script: - ./test.sh diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml index 3593fd0417f..2f4f4dbdcc5 100644 --- a/vendor/github.com/modern-go/reflect2/Gopkg.toml +++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml @@ -24,7 +24,7 @@ # go-tests = true # unused-packages = true -ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"] +ignored = [] [[constraint]] name = "github.com/modern-go/concurrent" diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go index 0632b71fb03..63b49c79919 100644 --- a/vendor/github.com/modern-go/reflect2/reflect2.go +++ b/vendor/github.com/modern-go/reflect2/reflect2.go @@ -150,6 +150,9 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type { } func (cfg *frozenConfig) Type2(type1 reflect.Type) Type { + if type1 == nil { + return nil + } cacheKey := uintptr(unpackEFace(type1).data) typeObj, found := cfg.cache.Load(cacheKey) if found { diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh index fbcef73098b..3d2b9768ce6 100755 --- a/vendor/github.com/modern-go/reflect2/test.sh +++ b/vendor/github.com/modern-go/reflect2/test.sh @@ -3,7 +3,7 @@ set -e echo "" > coverage.txt -for d in $(go list ./... | grep -v vendor); do +for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d if [ -f profile.out ]; then cat profile.out >> coverage.txt diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go index 6d489112ffd..3acfb55803a 100644 --- a/vendor/github.com/modern-go/reflect2/type_map.go +++ b/vendor/github.com/modern-go/reflect2/type_map.go @@ -4,6 +4,7 @@ import ( "reflect" "runtime" "strings" + "sync" "unsafe" ) @@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer //go:linkname typelinks2 reflect.typelinks func typelinks2() (sections []unsafe.Pointer, offset [][]int32) -var types = map[string]reflect.Type{} -var packages = map[string]map[string]reflect.Type{} +// initOnce guards initialization of types and packages +var initOnce sync.Once + +var types map[string]reflect.Type +var packages map[string]map[string]reflect.Type + +// discoverTypes initializes types and packages +func discoverTypes() { + types = make(map[string]reflect.Type) + packages = make(map[string]map[string]reflect.Type) -func init() { ver := runtime.Version() if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") { loadGo15Types() @@ -90,11 +98,13 @@ type emptyInterface struct { // TypeByName return the type by its name, just like Class.forName in java func TypeByName(typeName string) Type { + initOnce.Do(discoverTypes) return Type2(types[typeName]) } // TypeByPackageName return the type by its package and name func TypeByPackageName(pkgPath string, name string) Type { + initOnce.Do(discoverTypes) pkgTypes := packages[pkgPath] if pkgTypes == nil { return nil diff --git a/vendor/github.com/vishvananda/netlink/BUILD b/vendor/github.com/vishvananda/netlink/BUILD index 241a5366f95..fd84bb5129d 100644 --- a/vendor/github.com/vishvananda/netlink/BUILD +++ b/vendor/github.com/vishvananda/netlink/BUILD @@ -13,9 +13,6 @@ go_library( "conntrack_unspecified.go", "filter.go", "filter_linux.go", - "fou.go", - "fou_linux.go", - "fou_unspecified.go", "genetlink_linux.go", "genetlink_unspecified.go", "gtp_linux.go", @@ -54,7 +51,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//vendor/github.com/vishvananda/netlink/nl:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", ] + select({ "@io_bazel_rules_go//go/platform:android": [ "//vendor/github.com/vishvananda/netns:go_default_library", diff --git a/vendor/github.com/vishvananda/netlink/Makefile b/vendor/github.com/vishvananda/netlink/Makefile index a0e68e7a9aa..6c8413b13a5 100644 --- a/vendor/github.com/vishvananda/netlink/Makefile +++ b/vendor/github.com/vishvananda/netlink/Makefile @@ -3,8 +3,7 @@ DIRS := \ nl DEPS = \ - github.com/vishvananda/netns \ - golang.org/x/sys/unix + github.com/vishvananda/netns uniq = $(if $1,$(firstword $1) $(call uniq,$(filter-out $(firstword $1),$1))) testdirs = $(call uniq,$(foreach d,$(1),$(dir $(wildcard $(d)/*_test.go)))) @@ -19,7 +18,7 @@ $(call goroot,$(DEPS)): .PHONY: $(call testdirs,$(DIRS)) $(call testdirs,$(DIRS)): - go test -test.exec sudo -test.parallel 4 -timeout 60s -test.v github.com/vishvananda/netlink/$@ + sudo -E go test -test.parallel 4 -timeout 60s -v github.com/vishvananda/netlink/$@ $(call fmt,$(call testdirs,$(DIRS))): ! gofmt -l $(subst fmt-,,$@)/*.go | grep -q . diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md index a88e2f41840..0b61be217e0 100644 --- a/vendor/github.com/vishvananda/netlink/README.md +++ b/vendor/github.com/vishvananda/netlink/README.md @@ -89,4 +89,3 @@ There are also a few pieces of low level netlink functionality that still need to be implemented. Routing rules are not in place and some of the more advanced link types. Hopefully there is decent structure and testing in place to make these fairly straightforward to add. - diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go index 8597ab7fcbc..8808b42d9b6 100644 --- a/vendor/github.com/vishvananda/netlink/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -4,10 +4,10 @@ import ( "fmt" "net" "strings" + "syscall" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // IFA_FLAGS is a u32 attribute. @@ -22,7 +22,7 @@ func AddrAdd(link Link, addr *Addr) error { // AddrAdd will add an IP address to a link device. // Equivalent to: `ip addr add $addr dev $link` func (h *Handle) AddrAdd(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -35,7 +35,7 @@ func AddrReplace(link Link, addr *Addr) error { // AddrReplace will replace (or, if not present, add) an IP address on a link device. // Equivalent to: `ip addr replace $addr dev $link` func (h *Handle) AddrReplace(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_NEWADDR, unix.NLM_F_CREATE|unix.NLM_F_REPLACE|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -48,7 +48,7 @@ func AddrDel(link Link, addr *Addr) error { // AddrDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) AddrDel(link Link, addr *Addr) error { - req := h.newNetlinkRequest(unix.RTM_DELADDR, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) return h.addrHandle(link, addr, req) } @@ -75,7 +75,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error localAddrData = addr.IP.To16() } - localData := nl.NewRtAttr(unix.IFA_LOCAL, localAddrData) + localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData) req.AddData(localData) var peerAddrData []byte if addr.Peer != nil { @@ -88,7 +88,7 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error peerAddrData = localAddrData } - addressData := nl.NewRtAttr(unix.IFA_ADDRESS, peerAddrData) + addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData) req.AddData(addressData) if addr.Flags != 0 { @@ -109,14 +109,14 @@ func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error } addr.Broadcast = calcBroadcast } - req.AddData(nl.NewRtAttr(unix.IFA_BROADCAST, addr.Broadcast)) + req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast)) if addr.Label != "" { - labelData := nl.NewRtAttr(unix.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) req.AddData(labelData) } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -131,11 +131,11 @@ func AddrList(link Link, family int) ([]Addr, error) { // Equivalent to: `ip addr show`. // The list can be filtered by link and ip family. func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { - req := h.newNetlinkRequest(unix.RTM_GETADDR, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWADDR) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) if err != nil { return nil, err } @@ -187,21 +187,21 @@ func parseAddr(m []byte) (addr Addr, family, index int, err error) { var local, dst *net.IPNet for _, attr := range attrs { switch attr.Attr.Type { - case unix.IFA_ADDRESS: + case syscall.IFA_ADDRESS: dst = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.Peer = dst - case unix.IFA_LOCAL: + case syscall.IFA_LOCAL: local = &net.IPNet{ IP: attr.Value, Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), } addr.IPNet = local - case unix.IFA_BROADCAST: + case syscall.IFA_BROADCAST: addr.Broadcast = attr.Value - case unix.IFA_LABEL: + case syscall.IFA_LABEL: addr.Label = string(attr.Value[:len(attr.Value)-1]) case IFA_FLAGS: addr.Flags = int(native.Uint32(attr.Value[0:4])) @@ -264,7 +264,7 @@ func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, option } func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_IFADDR, unix.RTNLGRP_IPV6_IFADDR) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) if err != nil { return err } @@ -286,7 +286,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c } for _, m := range msgs { msgType := m.Header.Type - if msgType != unix.RTM_NEWADDR && msgType != unix.RTM_DELADDR { + if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR { if cberr != nil { cberr(fmt.Errorf("bad message type: %d", msgType)) } @@ -303,7 +303,7 @@ func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-c ch <- AddrUpdate{LinkAddress: *addr.IPNet, LinkIndex: ifindex, - NewAddr: msgType == unix.RTM_NEWADDR, + NewAddr: msgType == syscall.RTM_NEWADDR, Flags: addr.Flags, Scope: addr.Scope, PreferedLft: addr.PreferedLft, diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go index 6eb331ef154..a65d6a1319a 100644 --- a/vendor/github.com/vishvananda/netlink/bridge_linux.go +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -2,9 +2,9 @@ package netlink import ( "fmt" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // BridgeVlanList gets a map of device id to bridge vlan infos. @@ -16,12 +16,12 @@ func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { // BridgeVlanList gets a map of device id to bridge vlan infos. // Equivalent to: `bridge vlan show` func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanAdd adds a new vlan filter entry // Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_SETLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master) } // BridgeVlanDel adds a new vlan filter entry @@ -75,15 +75,15 @@ func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) err // BridgeVlanDel adds a new vlan filter entry // Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { - return h.bridgeVlanModify(unix.RTM_DELLINK, link, vid, pvid, untagged, self, master) + return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master) } func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(cmd, unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) @@ -107,7 +107,7 @@ func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged } nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) req.AddData(br) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go index a4997740e29..91cd3883de9 100644 --- a/vendor/github.com/vishvananda/netlink/class_linux.go +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // NOTE: function is in here because it uses other linux functions @@ -51,7 +50,7 @@ func ClassDel(class Class) error { // ClassDel will delete a class from the system. // Equivalent to: `tc class del $class` func (h *Handle) ClassDel(class Class) error { - return h.classModify(unix.RTM_DELTCLASS, 0, class) + return h.classModify(syscall.RTM_DELTCLASS, 0, class) } // ClassChange will change a class in place @@ -65,7 +64,7 @@ func ClassChange(class Class) error { // Equivalent to: `tc class change $class` // The parent and handle MUST NOT be changed. func (h *Handle) ClassChange(class Class) error { - return h.classModify(unix.RTM_NEWTCLASS, 0, class) + return h.classModify(syscall.RTM_NEWTCLASS, 0, class) } // ClassReplace will replace a class to the system. @@ -83,7 +82,7 @@ func ClassReplace(class Class) error { // If a class already exist with this parent/handle pair, the class is changed. // If a class does not already exist with this parent/handle, a new class is created. func (h *Handle) ClassReplace(class Class) error { - return h.classModify(unix.RTM_NEWTCLASS, unix.NLM_F_CREATE, class) + return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) } // ClassAdd will add a class to the system. @@ -96,14 +95,14 @@ func ClassAdd(class Class) error { // Equivalent to: `tc class add $class` func (h *Handle) ClassAdd(class Class) error { return h.classModify( - unix.RTM_NEWTCLASS, - unix.NLM_F_CREATE|unix.NLM_F_EXCL, + syscall.RTM_NEWTCLASS, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, class, ) } func (h *Handle) classModify(cmd, flags int, class Class) error { - req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) base := class.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -113,12 +112,12 @@ func (h *Handle) classModify(cmd, flags int, class Class) error { } req.AddData(msg) - if cmd != unix.RTM_DELTCLASS { + if cmd != syscall.RTM_DELTCLASS { if err := classPayload(req, class); err != nil { return err } } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -142,12 +141,12 @@ func classPayload(req *nl.NetlinkRequest, class Class) error { var rtab [256]uint32 var ctab [256]uint32 tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} - if CalcRtable(&tcrate, rtab[:], cellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate rate table") } opt.Rate = tcrate tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} - if CalcRtable(&tcceil, ctab[:], ccellLog, uint32(mtu), linklayer) < 0 { + if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 { return errors.New("HTB: failed to calculate ceil rate table") } opt.Ceil = tcceil @@ -170,7 +169,7 @@ func ClassList(link Link, parent uint32) ([]Class, error) { // Equivalent to: `tc class show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { - req := h.newNetlinkRequest(unix.RTM_GETTCLASS, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -182,7 +181,7 @@ func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTCLASS) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index a0fc74a3722..ecf04456590 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // ConntrackTableType Conntrack table for the netlink operation @@ -85,8 +85,8 @@ func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) // conntrack -F [table] Flush table // The flush operation applies to all the family types func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { - req := h.newConntrackRequest(table, unix.AF_INET, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) - _, err := req.Execute(unix.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_NETFILTER, 0) return err } @@ -102,10 +102,10 @@ func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFami for _, dataRaw := range res { flow := parseRawData(dataRaw) if match := filter.MatchConntrackFlow(flow); match { - req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, unix.NLM_F_ACK) + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already req2.AddRawData(dataRaw[4:]) - req2.Execute(unix.NETLINK_NETFILTER, 0) + req2.Execute(syscall.NETLINK_NETFILTER, 0) matched++ } } @@ -127,8 +127,8 @@ func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily } func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { - req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, unix.NLM_F_DUMP) - return req.Execute(unix.NETLINK_NETFILTER, 0) + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP) + return req.Execute(syscall.NETLINK_NETFILTER, 0) } // The full conntrack flow structure is very complicated and can be found in the file: diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 30b541494e2..1120c79d6a9 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -17,7 +17,7 @@ type FilterAttrs struct { Handle uint32 Parent uint32 Priority uint16 // lower is higher priority - Protocol uint16 // unix.ETH_P_* + Protocol uint16 // syscall.ETH_P_* } func (q FilterAttrs) String() string { diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 7cb7a4fd940..5025bd56c1b 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -9,7 +9,6 @@ import ( "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // Constants used in TcU32Sel.Flags. @@ -56,7 +55,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.Rate.Rate != 0 { police.Rate.Mpu = fattrs.Mpu police.Rate.Overhead = fattrs.Overhead - if CalcRtable(&police.Rate, rtab[:], rcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("TBF: failed to calculate rate table") } police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) @@ -65,7 +64,7 @@ func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { if police.PeakRate.Rate != 0 { police.PeakRate.Mpu = fattrs.Mpu police.PeakRate.Overhead = fattrs.Overhead - if CalcRtable(&police.PeakRate, ptab[:], pcellLog, fattrs.Mtu, linklayer) < 0 { + if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 { return nil, errors.New("POLICE: failed to calculate peak rate table") } } @@ -99,7 +98,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -110,7 +109,7 @@ func (h *Handle) FilterDel(filter Filter) error { } req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -124,7 +123,7 @@ func FilterAdd(filter Filter) error { // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { native = nl.NativeEndian() - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -225,7 +224,7 @@ func (h *Handle) FilterAdd(filter Filter) error { } req.AddData(options) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -240,7 +239,7 @@ func FilterList(link Link, parent uint32) ([]Filter, error) { // Equivalent to: `tc filter show`. // Generally returns nothing if link and parent are not specified. func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { - req := h.newNetlinkRequest(unix.RTM_GETTFILTER, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, Parent: parent, @@ -252,7 +251,7 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWTFILTER) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) if err != nil { return nil, err } @@ -563,7 +562,7 @@ func AdjustSize(sz uint, mpu uint, linklayer int) uint { } } -func CalcRtable(rate *nl.TcRateSpec, rtab []uint32, cellLog int, mtu uint32, linklayer int) int { +func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int { bps := rate.Rate mpu := rate.Mpu var sz uint diff --git a/vendor/github.com/vishvananda/netlink/fou.go b/vendor/github.com/vishvananda/netlink/fou.go deleted file mode 100644 index 71e73c37a0a..00000000000 --- a/vendor/github.com/vishvananda/netlink/fou.go +++ /dev/null @@ -1,21 +0,0 @@ -package netlink - -import ( - "errors" -) - -var ( - // ErrAttrHeaderTruncated is returned when a netlink attribute's header is - // truncated. - ErrAttrHeaderTruncated = errors.New("attribute header truncated") - // ErrAttrBodyTruncated is returned when a netlink attribute's body is - // truncated. - ErrAttrBodyTruncated = errors.New("attribute body truncated") -) - -type Fou struct { - Family int - Port int - Protocol int - EncapType int -} diff --git a/vendor/github.com/vishvananda/netlink/fou_linux.go b/vendor/github.com/vishvananda/netlink/fou_linux.go deleted file mode 100644 index 62d59bd2d09..00000000000 --- a/vendor/github.com/vishvananda/netlink/fou_linux.go +++ /dev/null @@ -1,215 +0,0 @@ -// +build linux - -package netlink - -import ( - "encoding/binary" - "errors" - - "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" -) - -const ( - FOU_GENL_NAME = "fou" -) - -const ( - FOU_CMD_UNSPEC uint8 = iota - FOU_CMD_ADD - FOU_CMD_DEL - FOU_CMD_GET - FOU_CMD_MAX = FOU_CMD_GET -) - -const ( - FOU_ATTR_UNSPEC = iota - FOU_ATTR_PORT - FOU_ATTR_AF - FOU_ATTR_IPPROTO - FOU_ATTR_TYPE - FOU_ATTR_REMCSUM_NOPARTIAL - FOU_ATTR_MAX = FOU_ATTR_REMCSUM_NOPARTIAL -) - -const ( - FOU_ENCAP_UNSPEC = iota - FOU_ENCAP_DIRECT - FOU_ENCAP_GUE - FOU_ENCAP_MAX = FOU_ENCAP_GUE -) - -var fouFamilyId int - -func FouFamilyId() (int, error) { - if fouFamilyId != 0 { - return fouFamilyId, nil - } - - fam, err := GenlFamilyGet(FOU_GENL_NAME) - if err != nil { - return -1, err - } - - fouFamilyId = int(fam.ID) - return fouFamilyId, nil -} - -func FouAdd(f Fou) error { - return pkgHandle.FouAdd(f) -} - -func (h *Handle) FouAdd(f Fou) error { - fam_id, err := FouFamilyId() - if err != nil { - return err - } - - // setting ip protocol conflicts with encapsulation type GUE - if f.EncapType == FOU_ENCAP_GUE && f.Protocol != 0 { - return errors.New("GUE encapsulation doesn't specify an IP protocol") - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) - - // int to byte for port - bp := make([]byte, 2) - binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_PORT, bp), - nl.NewRtAttr(FOU_ATTR_TYPE, []byte{uint8(f.EncapType)}), - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), - nl.NewRtAttr(FOU_ATTR_IPPROTO, []byte{uint8(f.Protocol)}), - } - raw := []byte{FOU_CMD_ADD, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - _, err = req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return err - } - - return nil -} - -func FouDel(f Fou) error { - return pkgHandle.FouDel(f) -} - -func (h *Handle) FouDel(f Fou) error { - fam_id, err := FouFamilyId() - if err != nil { - return err - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_ACK) - - // int to byte for port - bp := make([]byte, 2) - binary.BigEndian.PutUint16(bp[0:2], uint16(f.Port)) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_PORT, bp), - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(f.Family)}), - } - raw := []byte{FOU_CMD_DEL, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - _, err = req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return err - } - - return nil -} - -func FouList(fam int) ([]Fou, error) { - return pkgHandle.FouList(fam) -} - -func (h *Handle) FouList(fam int) ([]Fou, error) { - fam_id, err := FouFamilyId() - if err != nil { - return nil, err - } - - req := h.newNetlinkRequest(fam_id, unix.NLM_F_DUMP) - - attrs := []*nl.RtAttr{ - nl.NewRtAttr(FOU_ATTR_AF, []byte{uint8(fam)}), - } - raw := []byte{FOU_CMD_GET, 1, 0, 0} - for _, a := range attrs { - raw = append(raw, a.Serialize()...) - } - - req.AddRawData(raw) - - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) - if err != nil { - return nil, err - } - - fous := make([]Fou, 0, len(msgs)) - for _, m := range msgs { - f, err := deserializeFouMsg(m) - if err != nil { - return fous, err - } - - fous = append(fous, f) - } - - return fous, nil -} - -func deserializeFouMsg(msg []byte) (Fou, error) { - // we'll skip to byte 4 to first attribute - msg = msg[3:] - var shift int - fou := Fou{} - - for { - // attribute header is at least 16 bits - if len(msg) < 4 { - return fou, ErrAttrHeaderTruncated - } - - lgt := int(binary.BigEndian.Uint16(msg[0:2])) - if len(msg) < lgt+4 { - return fou, ErrAttrBodyTruncated - } - attr := binary.BigEndian.Uint16(msg[2:4]) - - shift = lgt + 3 - switch attr { - case FOU_ATTR_AF: - fou.Family = int(msg[5]) - case FOU_ATTR_PORT: - fou.Port = int(binary.BigEndian.Uint16(msg[5:7])) - // port is 2 bytes - shift = lgt + 2 - case FOU_ATTR_IPPROTO: - fou.Protocol = int(msg[5]) - case FOU_ATTR_TYPE: - fou.EncapType = int(msg[5]) - } - - msg = msg[shift:] - - if len(msg) < 4 { - break - } - } - - return fou, nil -} diff --git a/vendor/github.com/vishvananda/netlink/fou_unspecified.go b/vendor/github.com/vishvananda/netlink/fou_unspecified.go deleted file mode 100644 index 3a8365bfe62..00000000000 --- a/vendor/github.com/vishvananda/netlink/fou_unspecified.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux - -package netlink - -func FouAdd(f Fou) error { - return ErrNotImplemented -} - -func FouDel(f Fou) error { - return ErrNotImplemented -} - -func FouList(fam int) ([]Fou, error) { - return nil, ErrNotImplemented -} diff --git a/vendor/github.com/vishvananda/netlink/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/genetlink_linux.go index ce7969907d4..a388a87001c 100644 --- a/vendor/github.com/vishvananda/netlink/genetlink_linux.go +++ b/vendor/github.com/vishvananda/netlink/genetlink_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) type GenlOp struct { @@ -131,9 +130,9 @@ func (h *Handle) GenlFamilyList() ([]*GenlFamily, error) { Command: nl.GENL_CTRL_CMD_GETFAMILY, Version: nl.GENL_CTRL_VERSION, } - req := h.newNetlinkRequest(nl.GENL_ID_CTRL, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.GENL_ID_CTRL, syscall.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -152,7 +151,7 @@ func (h *Handle) GenlFamilyGet(name string) (*GenlFamily, error) { req := h.newNetlinkRequest(nl.GENL_ID_CTRL, 0) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_CTRL_ATTR_FAMILY_NAME, nl.ZeroTerminated(name))) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/gtp_linux.go b/vendor/github.com/vishvananda/netlink/gtp_linux.go index f5e160ba5c0..7331303ecbe 100644 --- a/vendor/github.com/vishvananda/netlink/gtp_linux.go +++ b/vendor/github.com/vishvananda/netlink/gtp_linux.go @@ -7,7 +7,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) type PDP struct { @@ -83,9 +82,9 @@ func (h *Handle) GTPPDPList() ([]*PDP, error) { Command: nl.GENL_GTP_CMD_GETPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_DUMP) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_DUMP) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -97,7 +96,7 @@ func GTPPDPList() ([]*PDP, error) { } func gtpPDPGet(req *nl.NetlinkRequest) (*PDP, error) { - msgs, err := req.Execute(unix.NETLINK_GENERIC, 0) + msgs, err := req.Execute(syscall.NETLINK_GENERIC, 0) if err != nil { return nil, err } @@ -183,7 +182,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_NEWPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -200,7 +199,7 @@ func (h *Handle) GTPPDPAdd(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(unix.NETLINK_GENERIC, 0) + _, err = req.Execute(syscall.NETLINK_GENERIC, 0) return err } @@ -217,7 +216,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { Command: nl.GENL_GTP_CMD_DELPDP, Version: nl.GENL_GTP_VERSION, } - req := h.newNetlinkRequest(int(f.ID), unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(int(f.ID), syscall.NLM_F_EXCL|syscall.NLM_F_ACK) req.AddData(msg) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_VERSION, nl.Uint32Attr(pdp.Version))) req.AddData(nl.NewRtAttr(nl.GENL_GTP_ATTR_LINK, nl.Uint32Attr(uint32(link.Attrs().Index)))) @@ -230,7 +229,7 @@ func (h *Handle) GTPPDPDel(link Link, pdp *PDP) error { default: return fmt.Errorf("unsupported GTP version: %d", pdp.Version) } - _, err = req.Execute(unix.NETLINK_GENERIC, 0) + _, err = req.Execute(syscall.NETLINK_GENERIC, 0) return err } diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go index 9f6d7fe0fbd..d37b087c33e 100644 --- a/vendor/github.com/vishvananda/netlink/handle_linux.go +++ b/vendor/github.com/vishvananda/netlink/handle_linux.go @@ -2,11 +2,11 @@ package netlink import ( "fmt" + "syscall" "time" "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // Empty handle used by the netlink package methods @@ -43,7 +43,7 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { if to < time.Microsecond { return fmt.Errorf("invalid timeout, minimul value is %s", time.Microsecond) } - tv := unix.NsecToTimeval(to.Nanoseconds()) + tv := syscall.NsecToTimeval(to.Nanoseconds()) for _, sh := range h.sockets { if err := sh.Socket.SetSendTimeout(&tv); err != nil { return err @@ -59,13 +59,13 @@ func (h *Handle) SetSocketTimeout(to time.Duration) error { // socket in the netlink handle. The maximum value is capped by // /proc/sys/net/core/rmem_max. func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error { - opt := unix.SO_RCVBUF + opt := syscall.SO_RCVBUF if force { - opt = unix.SO_RCVBUFFORCE + opt = syscall.SO_RCVBUFFORCE } for _, sh := range h.sockets { fd := sh.Socket.GetFd() - err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, opt, size) + err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, opt, size) if err != nil { return err } @@ -81,7 +81,7 @@ func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) { i := 0 for _, sh := range h.sockets { fd := sh.Socket.GetFd() - size, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF) + size, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF) if err != nil { return nil, err } @@ -134,10 +134,10 @@ func (h *Handle) newNetlinkRequest(proto, flags int) *nl.NetlinkRequest { return nl.NewNetlinkRequest(proto, flags) } return &nl.NetlinkRequest{ - NlMsghdr: unix.NlMsghdr{ - Len: uint32(unix.SizeofNlMsghdr), + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), Type: uint16(proto), - Flags: unix.NLM_F_REQUEST | uint16(flags), + Flags: syscall.NLM_F_REQUEST | uint16(flags), }, Sockets: h.sockets, } diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index d8ba16a948f..5aa3a1790ab 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -3,7 +3,6 @@ package netlink import ( "fmt" "net" - "os" ) // Link represents a link device from netlink. Shared link attributes @@ -39,8 +38,6 @@ type LinkAttrs struct { Protinfo *Protinfo OperState LinkOperState NetNsID int - NumTxQueues int - NumRxQueues int } // LinkOperState represents the values of the IFLA_OPERSTATE link @@ -262,9 +259,6 @@ const ( type Macvlan struct { LinkAttrs Mode MacvlanMode - - // MACAddrs is only populated for Macvlan SOURCE links - MACAddrs []net.HardwareAddr } func (macvlan *Macvlan) Attrs() *LinkAttrs { @@ -290,10 +284,8 @@ type TuntapFlag uint16 // Tuntap links created via /dev/tun/tap, but can be destroyed via netlink type Tuntap struct { LinkAttrs - Mode TuntapMode - Flags TuntapFlag - Queues int - Fds []*os.File + Mode TuntapMode + Flags TuntapFlag } func (tuntap *Tuntap) Attrs() *LinkAttrs { @@ -335,28 +327,26 @@ func (generic *GenericLink) Type() string { type Vxlan struct { LinkAttrs - VxlanId int - VtepDevIndex int - SrcAddr net.IP - Group net.IP - TTL int - TOS int - Learning bool - Proxy bool - RSC bool - L2miss bool - L3miss bool - UDPCSum bool - UDP6ZeroCSumTx bool - UDP6ZeroCSumRx bool - NoAge bool - GBP bool - FlowBased bool - Age int - Limit int - Port int - PortLow int - PortHigh int + VxlanId int + VtepDevIndex int + SrcAddr net.IP + Group net.IP + TTL int + TOS int + Learning bool + Proxy bool + RSC bool + L2miss bool + L3miss bool + UDPCSum bool + NoAge bool + GBP bool + FlowBased bool + Age int + Limit int + Port int + PortLow int + PortHigh int } func (vxlan *Vxlan) Attrs() *LinkAttrs { @@ -710,17 +700,12 @@ func (gretap *Gretap) Type() string { type Iptun struct { LinkAttrs - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - Link uint32 - Local net.IP - Remote net.IP - EncapSport uint16 - EncapDport uint16 - EncapType uint16 - EncapFlags uint16 - FlowBased bool + Ttl uint8 + Tos uint8 + PMtuDisc uint8 + Link uint32 + Local net.IP + Remote net.IP } func (iptun *Iptun) Attrs() *LinkAttrs { @@ -731,28 +716,6 @@ func (iptun *Iptun) Type() string { return "ipip" } -type Sittun struct { - LinkAttrs - Link uint32 - Local net.IP - Remote net.IP - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - EncapType uint16 - EncapFlags uint16 - EncapSport uint16 - EncapDport uint16 -} - -func (sittun *Sittun) Attrs() *LinkAttrs { - return &sittun.LinkAttrs -} - -func (sittun *Sittun) Type() string { - return "sit" -} - type Vti struct { LinkAttrs IKey uint32 @@ -772,20 +735,16 @@ func (iptun *Vti) Type() string { type Gretun struct { LinkAttrs - Link uint32 - IFlags uint16 - OFlags uint16 - IKey uint32 - OKey uint32 - Local net.IP - Remote net.IP - Ttl uint8 - Tos uint8 - PMtuDisc uint8 - EncapType uint16 - EncapFlags uint16 - EncapSport uint16 - EncapDport uint16 + Link uint32 + IFlags uint16 + OFlags uint16 + IKey uint32 + OKey uint32 + Local net.IP + Remote net.IP + Ttl uint8 + Tos uint8 + PMtuDisc uint8 } func (gretun *Gretun) Attrs() *LinkAttrs { diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index a6ae1041877..e94fd9766cd 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -11,7 +11,6 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) const ( @@ -21,15 +20,13 @@ const ( ) const ( - TUNTAP_MODE_TUN TuntapMode = unix.IFF_TUN - TUNTAP_MODE_TAP TuntapMode = unix.IFF_TAP - TUNTAP_DEFAULTS TuntapFlag = unix.IFF_TUN_EXCL | unix.IFF_ONE_QUEUE - TUNTAP_VNET_HDR TuntapFlag = unix.IFF_VNET_HDR - TUNTAP_TUN_EXCL TuntapFlag = unix.IFF_TUN_EXCL - TUNTAP_NO_PI TuntapFlag = unix.IFF_NO_PI - TUNTAP_ONE_QUEUE TuntapFlag = unix.IFF_ONE_QUEUE - TUNTAP_MULTI_QUEUE TuntapFlag = 0x0100 - TUNTAP_MULTI_QUEUE_DEFAULTS TuntapFlag = TUNTAP_MULTI_QUEUE | TUNTAP_NO_PI + TUNTAP_MODE_TUN TuntapMode = syscall.IFF_TUN + TUNTAP_MODE_TAP TuntapMode = syscall.IFF_TAP + TUNTAP_DEFAULTS TuntapFlag = syscall.IFF_TUN_EXCL | syscall.IFF_ONE_QUEUE + TUNTAP_VNET_HDR TuntapFlag = syscall.IFF_VNET_HDR + TUNTAP_TUN_EXCL TuntapFlag = syscall.IFF_TUN_EXCL + TUNTAP_NO_PI TuntapFlag = syscall.IFF_NO_PI + TUNTAP_ONE_QUEUE TuntapFlag = syscall.IFF_ONE_QUEUE ) var lookupByDump = false @@ -64,15 +61,15 @@ func (h *Handle) ensureIndex(link *LinkAttrs) { func (h *Handle) LinkSetARPOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change |= unix.IFF_NOARP - msg.Flags |= unix.IFF_NOARP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change |= syscall.IFF_NOARP + msg.Flags |= syscall.IFF_NOARP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -83,15 +80,15 @@ func LinkSetARPOff(link Link) error { func (h *Handle) LinkSetARPOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change |= unix.IFF_NOARP - msg.Flags &= ^uint32(unix.IFF_NOARP) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change |= syscall.IFF_NOARP + msg.Flags &= ^uint32(syscall.IFF_NOARP) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -102,84 +99,15 @@ func LinkSetARPOn(link Link) error { func (h *Handle) SetPromiscOn(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_PROMISC - msg.Flags = unix.IFF_PROMISC + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_PROMISC + msg.Flags = syscall.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err -} - -func MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrAdd(link, addr) -} - -func (h *Handle) MacvlanMACAddrAdd(link Link, addr net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_ADD) -} - -func MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrDel(link, addr) -} - -func (h *Handle) MacvlanMACAddrDel(link Link, addr net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, []net.HardwareAddr{addr}, nl.MACVLAN_MACADDR_DEL) -} - -func MacvlanMACAddrFlush(link Link) error { - return pkgHandle.MacvlanMACAddrFlush(link) -} - -func (h *Handle) MacvlanMACAddrFlush(link Link) error { - return h.macvlanMACAddrChange(link, nil, nl.MACVLAN_MACADDR_FLUSH) -} - -func MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { - return pkgHandle.MacvlanMACAddrSet(link, addrs) -} - -func (h *Handle) MacvlanMACAddrSet(link Link, addrs []net.HardwareAddr) error { - return h.macvlanMACAddrChange(link, addrs, nl.MACVLAN_MACADDR_SET) -} - -func (h *Handle) macvlanMACAddrChange(link Link, addrs []net.HardwareAddr, mode uint32) error { - base := link.Attrs() - h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) - - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Index = int32(base.Index) - req.AddData(msg) - - linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) - nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) - inner := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - - // IFLA_MACVLAN_MACADDR_MODE = mode - b := make([]byte, 4) - native.PutUint32(b, mode) - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_MODE, b) - - // populate message with MAC addrs, if necessary - switch mode { - case nl.MACVLAN_MACADDR_ADD, nl.MACVLAN_MACADDR_DEL: - if len(addrs) == 1 { - nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR, []byte(addrs[0])) - } - case nl.MACVLAN_MACADDR_SET: - mad := nl.NewRtAttrChild(inner, nl.IFLA_MACVLAN_MACADDR_DATA, nil) - for _, addr := range addrs { - nl.NewRtAttrChild(mad, nl.IFLA_MACVLAN_MACADDR, []byte(addr)) - } - } - - req.AddData(linkInfo) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -190,7 +118,7 @@ func BridgeSetMcastSnoop(link Link, on bool) error { func (h *Handle) BridgeSetMcastSnoop(link Link, on bool) error { bridge := link.(*Bridge) bridge.MulticastSnooping = &on - return h.linkModify(bridge, unix.NLM_F_ACK) + return h.linkModify(bridge, syscall.NLM_F_ACK) } func SetPromiscOn(link Link) error { @@ -200,15 +128,15 @@ func SetPromiscOn(link Link) error { func (h *Handle) SetPromiscOff(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_PROMISC - msg.Flags = 0 & ^unix.IFF_PROMISC + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_PROMISC + msg.Flags = 0 & ^syscall.IFF_PROMISC msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -227,15 +155,15 @@ func LinkSetUp(link Link) error { func (h *Handle) LinkSetUp(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_UP - msg.Flags = unix.IFF_UP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -250,15 +178,15 @@ func LinkSetDown(link Link) error { func (h *Handle) LinkSetDown(link Link) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_NEWLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) - msg.Change = unix.IFF_UP - msg.Flags = 0 & ^unix.IFF_UP + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Change = syscall.IFF_UP + msg.Flags = 0 & ^syscall.IFF_UP msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -273,19 +201,19 @@ func LinkSetMTU(link Link, mtu int) error { func (h *Handle) LinkSetMTU(link Link, mtu int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(mtu)) - data := nl.NewRtAttr(unix.IFLA_MTU, b) + data := nl.NewRtAttr(syscall.IFLA_MTU, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -300,16 +228,16 @@ func LinkSetName(link Link, name string) error { func (h *Handle) LinkSetName(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_IFNAME, []byte(name)) + data := nl.NewRtAttr(syscall.IFLA_IFNAME, []byte(name)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -324,16 +252,16 @@ func LinkSetAlias(link Link, name string) error { func (h *Handle) LinkSetAlias(link Link, name string) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_IFALIAS, []byte(name)) + data := nl.NewRtAttr(syscall.IFLA_IFALIAS, []byte(name)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -348,16 +276,16 @@ func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - data := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(hwaddr)) + data := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(hwaddr)) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -372,9 +300,9 @@ func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -387,7 +315,7 @@ func (h *Handle) LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAdd nl.NewRtAttrChild(info, nl.IFLA_VF_MAC, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -402,9 +330,9 @@ func LinkSetVfVlan(link Link, vf, vlan int) error { func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -417,7 +345,7 @@ func (h *Handle) LinkSetVfVlan(link Link, vf, vlan int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_VLAN, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -432,9 +360,9 @@ func LinkSetVfTxRate(link Link, vf, rate int) error { func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -447,7 +375,7 @@ func (h *Handle) LinkSetVfTxRate(link Link, vf, rate int) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TX_RATE, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -463,9 +391,9 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -481,7 +409,7 @@ func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -497,9 +425,9 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { var setting uint32 base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -515,7 +443,7 @@ func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error { nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize()) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -563,19 +491,19 @@ func LinkSetMasterByIndex(link Link, masterIndex int) error { func (h *Handle) LinkSetMasterByIndex(link Link, masterIndex int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(masterIndex)) - data := nl.NewRtAttr(unix.IFLA_MASTER, b) + data := nl.NewRtAttr(syscall.IFLA_MASTER, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -592,19 +520,19 @@ func LinkSetNsPid(link Link, nspid int) error { func (h *Handle) LinkSetNsPid(link Link, nspid int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(nspid)) - data := nl.NewRtAttr(unix.IFLA_NET_NS_PID, b) + data := nl.NewRtAttr(syscall.IFLA_NET_NS_PID, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -621,9 +549,9 @@ func LinkSetNsFd(link Link, fd int) error { func (h *Handle) LinkSetNsFd(link Link, fd int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) @@ -633,7 +561,7 @@ func (h *Handle) LinkSetNsFd(link Link, fd int) error { data := nl.NewRtAttr(nl.IFLA_NET_NS_FD, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -648,15 +576,15 @@ func LinkSetXdpFd(link Link, fd int) error { func LinkSetXdpFdWithFlags(link Link, fd, flags int) error { base := link.Attrs() ensureIndex(base) - req := nl.NewNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -714,8 +642,6 @@ func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_RSC, boolAttr(vxlan.RSC)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L2MISS, boolAttr(vxlan.L2miss)) nl.NewRtAttrChild(data, nl.IFLA_VXLAN_L3MISS, boolAttr(vxlan.L3miss)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX, boolAttr(vxlan.UDP6ZeroCSumTx)) - nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX, boolAttr(vxlan.UDP6ZeroCSumRx)) if vxlan.UDPCSum { nl.NewRtAttrChild(data, nl.IFLA_VXLAN_UDP_CSUM, boolAttr(vxlan.UDPCSum)) @@ -840,12 +766,6 @@ func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { } } -func cleanupFds(fds []*os.File) { - for _, f := range fds { - f.Close() - } -} - // LinkAdd adds a new link device. The type and features of the device // are taken from the parameters in the link object. // Equivalent to: `ip link add $link` @@ -857,7 +777,7 @@ func LinkAdd(link Link) error { // are taken fromt the parameters in the link object. // Equivalent to: `ip link add $link` func (h *Handle) LinkAdd(link Link) error { - return h.linkModify(link, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) } func (h *Handle) linkModify(link Link, flags int) error { @@ -871,152 +791,101 @@ func (h *Handle) linkModify(link Link, flags int) error { if tuntap, ok := link.(*Tuntap); ok { // TODO: support user // TODO: support group + // TODO: multi_queue // TODO: support non- persistent - if tuntap.Mode < unix.IFF_TUN || tuntap.Mode > unix.IFF_TAP { + if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) } - - queues := tuntap.Queues - - var fds []*os.File + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() var req ifReq - copy(req.Name[:15], base.Name) - - req.Flags = uint16(tuntap.Flags) - - if queues == 0 { //Legacy compatibility - queues = 1 - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_DEFAULTS) - } + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) } else { - // For best peformance set Flags to TUNTAP_MULTI_QUEUE_DEFAULTS | TUNTAP_VNET_HDR - // when a) KVM has support for this ABI and - // b) the value of the flag is queryable using the TUNGETIFF ioctl - if tuntap.Flags == 0 { - req.Flags = uint16(TUNTAP_MULTI_QUEUE_DEFAULTS) - } + req.Flags = uint16(tuntap.Flags) } - req.Flags |= uint16(tuntap.Mode) - - for i := 0; i < queues; i++ { - localReq := req - file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) - if err != nil { - cleanupFds(fds) - return err - } - - fds = append(fds, file) - _, _, errno := unix.Syscall(unix.SYS_IOCTL, file.Fd(), uintptr(unix.TUNSETIFF), uintptr(unsafe.Pointer(&localReq))) - if errno != 0 { - cleanupFds(fds) - return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed [%d], errno %v", i, errno) - } - } - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 1) + copy(req.Name[:15], base.Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) + } + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) if errno != 0 { - cleanupFds(fds) return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) } - h.ensureIndex(base) // can't set master during create, so set it afterwards if base.MasterIndex != 0 { // TODO: verify MasterIndex is actually a bridge? - err := h.LinkSetMasterByIndex(link, base.MasterIndex) - if err != nil { - _, _, _ = unix.Syscall(unix.SYS_IOCTL, fds[0].Fd(), uintptr(unix.TUNSETPERSIST), 0) - cleanupFds(fds) - return err - } + return h.LinkSetMasterByIndex(link, base.MasterIndex) } - - if tuntap.Queues == 0 { - cleanupFds(fds) - } else { - tuntap.Fds = fds - } - return nil } - req := h.newNetlinkRequest(unix.RTM_NEWLINK, flags) + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) // TODO: make it shorter if base.Flags&net.FlagUp != 0 { - msg.Change = unix.IFF_UP - msg.Flags = unix.IFF_UP + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP } if base.Flags&net.FlagBroadcast != 0 { - msg.Change |= unix.IFF_BROADCAST - msg.Flags |= unix.IFF_BROADCAST + msg.Change |= syscall.IFF_BROADCAST + msg.Flags |= syscall.IFF_BROADCAST } if base.Flags&net.FlagLoopback != 0 { - msg.Change |= unix.IFF_LOOPBACK - msg.Flags |= unix.IFF_LOOPBACK + msg.Change |= syscall.IFF_LOOPBACK + msg.Flags |= syscall.IFF_LOOPBACK } if base.Flags&net.FlagPointToPoint != 0 { - msg.Change |= unix.IFF_POINTOPOINT - msg.Flags |= unix.IFF_POINTOPOINT + msg.Change |= syscall.IFF_POINTOPOINT + msg.Flags |= syscall.IFF_POINTOPOINT } if base.Flags&net.FlagMulticast != 0 { - msg.Change |= unix.IFF_MULTICAST - msg.Flags |= unix.IFF_MULTICAST + msg.Change |= syscall.IFF_MULTICAST + msg.Flags |= syscall.IFF_MULTICAST } - if base.Index != 0 { - msg.Index = int32(base.Index) - } - req.AddData(msg) if base.ParentIndex != 0 { b := make([]byte, 4) native.PutUint32(b, uint32(base.ParentIndex)) - data := nl.NewRtAttr(unix.IFLA_LINK, b) + data := nl.NewRtAttr(syscall.IFLA_LINK, b) req.AddData(data) } else if link.Type() == "ipvlan" { return fmt.Errorf("Can't create ipvlan link without ParentIndex") } - nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) req.AddData(nameData) if base.MTU > 0 { - mtu := nl.NewRtAttr(unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) req.AddData(mtu) } if base.TxQLen >= 0 { - qlen := nl.NewRtAttr(unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) req.AddData(qlen) } if base.HardwareAddr != nil { - hwaddr := nl.NewRtAttr(unix.IFLA_ADDRESS, []byte(base.HardwareAddr)) + hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr)) req.AddData(hwaddr) } - if base.NumTxQueues > 0 { - txqueues := nl.NewRtAttr(nl.IFLA_NUM_TX_QUEUES, nl.Uint32Attr(uint32(base.NumTxQueues))) - req.AddData(txqueues) - } - - if base.NumRxQueues > 0 { - rxqueues := nl.NewRtAttr(nl.IFLA_NUM_RX_QUEUES, nl.Uint32Attr(uint32(base.NumRxQueues))) - req.AddData(rxqueues) - } - if base.Namespace != nil { var attr *nl.RtAttr switch base.Namespace.(type) { case NsPid: val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) - attr = nl.NewRtAttr(unix.IFLA_NET_NS_PID, val) + attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) case NsFd: val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) @@ -1029,7 +898,7 @@ func (h *Handle) linkModify(link Link, flags int) error { addXdpAttrs(base.Xdp, req) } - linkInfo := nl.NewRtAttr(unix.IFLA_LINKINFO, nil) + linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) switch link := link.(type) { @@ -1041,13 +910,13 @@ func (h *Handle) linkModify(link Link, flags int) error { case *Veth: data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) - nl.NewIfInfomsgChild(peer, unix.AF_UNSPEC) - nl.NewRtAttrChild(peer, unix.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) + nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) if base.TxQLen >= 0 { - nl.NewRtAttrChild(peer, unix.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) } if base.MTU > 0 { - nl.NewRtAttrChild(peer, unix.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) } case *Vxlan: @@ -1071,8 +940,6 @@ func (h *Handle) linkModify(link Link, flags int) error { addGretapAttrs(link, linkInfo) case *Iptun: addIptunAttrs(link, linkInfo) - case *Sittun: - addSittunAttrs(link, linkInfo) case *Gretun: addGretunAttrs(link, linkInfo) case *Vti: @@ -1087,7 +954,7 @@ func (h *Handle) linkModify(link Link, flags int) error { req.AddData(linkInfo) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } @@ -1117,13 +984,13 @@ func (h *Handle) LinkDel(link Link) error { h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_DELLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -1166,16 +1033,16 @@ func (h *Handle) LinkByName(name string) (Link, error) { return h.linkByNameDump(name) } - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(unix.IFLA_IFNAME, nl.ZeroTerminated(name)) + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) req.AddData(nameData) link, err := execGetLink(req) - if err == unix.EINVAL { + if err == syscall.EINVAL { // older kernels don't support looking up via IFLA_IFNAME // so fall back to dumping all links h.lookupByDump = true @@ -1198,16 +1065,16 @@ func (h *Handle) LinkByAlias(alias string) (Link, error) { return h.linkByAliasDump(alias) } - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - nameData := nl.NewRtAttr(unix.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) req.AddData(nameData) link, err := execGetLink(req) - if err == unix.EINVAL { + if err == syscall.EINVAL { // older kernels don't support looking up via IFLA_IFALIAS // so fall back to dumping all links h.lookupByDump = true @@ -1224,9 +1091,9 @@ func LinkByIndex(index int) (Link, error) { // LinkByIndex finds a link by index and returns a pointer to the object. func (h *Handle) LinkByIndex(index int) (Link, error) { - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(index) req.AddData(msg) @@ -1234,10 +1101,10 @@ func (h *Handle) LinkByIndex(index int) (Link, error) { } func execGetLink(req *nl.NetlinkRequest) (Link, error) { - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { if errno, ok := err.(syscall.Errno); ok { - if errno == unix.ENODEV { + if errno == syscall.ENODEV { return nil, LinkNotFoundError{fmt.Errorf("Link not found")} } } @@ -1258,7 +1125,7 @@ func execGetLink(req *nl.NetlinkRequest) (Link, error) { // linkDeserialize deserializes a raw message received from netlink into // a link object. -func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { +func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { msg := nl.DeserializeIfInfomsg(m) attrs, err := nl.ParseRouteAttr(m[msg.Len():]) @@ -1267,7 +1134,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { } base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} - if msg.Flags&unix.IFF_PROMISC != 0 { + if msg.Flags&syscall.IFF_PROMISC != 0 { base.Promisc = 1 } var ( @@ -1278,7 +1145,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { ) for _, attr := range attrs { switch attr.Attr.Type { - case unix.IFLA_LINKINFO: + case syscall.IFLA_LINKINFO: infos, err := nl.ParseRouteAttr(attr.Value) if err != nil { return nil, err @@ -1312,8 +1179,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { link = &Gretap{} case "ipip": link = &Iptun{} - case "sit": - link = &Sittun{} case "gre": link = &Gretun{} case "vti": @@ -1347,8 +1212,6 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { parseGretapData(link, data) case "ipip": parseIptunData(link, data) - case "sit": - parseSittunData(link, data) case "gre": parseGretunData(link, data) case "vti": @@ -1362,7 +1225,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { } } } - case unix.IFLA_ADDRESS: + case syscall.IFLA_ADDRESS: var nonzero bool for _, b := range attr.Value { if b != 0 { @@ -1372,19 +1235,19 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { if nonzero { base.HardwareAddr = attr.Value[:] } - case unix.IFLA_IFNAME: + case syscall.IFLA_IFNAME: base.Name = string(attr.Value[:len(attr.Value)-1]) - case unix.IFLA_MTU: + case syscall.IFLA_MTU: base.MTU = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_LINK: + case syscall.IFLA_LINK: base.ParentIndex = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_MASTER: + case syscall.IFLA_MASTER: base.MasterIndex = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_TXQLEN: + case syscall.IFLA_TXQLEN: base.TxQLen = int(native.Uint32(attr.Value[0:4])) - case unix.IFLA_IFALIAS: + case syscall.IFLA_IFALIAS: base.Alias = string(attr.Value[:len(attr.Value)-1]) - case unix.IFLA_STATS: + case syscall.IFLA_STATS: stats32 = attr.Value[:] case IFLA_STATS64: stats64 = attr.Value[:] @@ -1394,16 +1257,16 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { return nil, err } base.Xdp = xdp - case unix.IFLA_PROTINFO | unix.NLA_F_NESTED: - if hdr != nil && hdr.Type == unix.RTM_NEWLINK && - msg.Family == unix.AF_BRIDGE { + case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED: + if hdr != nil && hdr.Type == syscall.RTM_NEWLINK && + msg.Family == syscall.AF_BRIDGE { attrs, err := nl.ParseRouteAttr(attr.Value[:]) if err != nil { return nil, err } base.Protinfo = parseProtinfo(attrs) } - case unix.IFLA_OPERSTATE: + case syscall.IFLA_OPERSTATE: base.OperState = LinkOperState(uint8(attr.Value[0])) case nl.IFLA_LINK_NETNSID: base.NetNsID = int(native.Uint32(attr.Value[0:4])) @@ -1436,12 +1299,12 @@ func LinkList() ([]Link, error) { func (h *Handle) LinkList() ([]Link, error) { // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need // to get the message ourselves to parse link type. - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWLINK) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) if err != nil { return nil, err } @@ -1461,7 +1324,7 @@ func (h *Handle) LinkList() ([]Link, error) { // LinkUpdate is used to pass information back from LinkSubscribe() type LinkUpdate struct { nl.IfInfomsg - Header unix.NlMsghdr + Header syscall.NlMsghdr Link } @@ -1496,7 +1359,7 @@ func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, option } func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_LINK) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) if err != nil { return err } @@ -1518,15 +1381,14 @@ func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-c } for _, m := range msgs { ifmsg := nl.DeserializeIfInfomsg(m.Data) - header := unix.NlMsghdr(m.Header) - link, err := LinkDeserialize(&header, m.Data) + link, err := LinkDeserialize(&m.Header, m.Data) if err != nil { if cberr != nil { cberr(err) } return } - ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: header, Link: link} + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link} } } }() @@ -1601,16 +1463,16 @@ func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) msg.Index = int32(base.Index) req.AddData(msg) - br := nl.NewRtAttr(unix.IFLA_PROTINFO|unix.NLA_F_NESTED, nil) + br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) nl.NewRtAttrChild(br, attr, boolToByte(mode)) req.AddData(br) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return err } @@ -1628,19 +1490,19 @@ func LinkSetTxQLen(link Link, qlen int) error { func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { base := link.Attrs() h.ensureIndex(base) - req := h.newNetlinkRequest(unix.RTM_SETLINK, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) - msg := nl.NewIfInfomsg(unix.AF_UNSPEC) + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) msg.Index = int32(base.Index) req.AddData(msg) b := make([]byte, 4) native.PutUint32(b, uint32(qlen)) - data := nl.NewRtAttr(unix.IFLA_TXQLEN, b) + data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b) req.AddData(data) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -1686,10 +1548,6 @@ func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { vxlan.L3miss = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_UDP_CSUM: vxlan.UDPCSum = int8(datum.Value[0]) != 0 - case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_TX: - vxlan.UDP6ZeroCSumTx = int8(datum.Value[0]) != 0 - case nl.IFLA_VXLAN_UDP_ZERO_CSUM6_RX: - vxlan.UDP6ZeroCSumRx = int8(datum.Value[0]) != 0 case nl.IFLA_VXLAN_GBP: vxlan.GBP = true case nl.IFLA_VXLAN_FLOWBASED: @@ -1792,8 +1650,7 @@ func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { macv := link.(*Macvlan) for _, datum := range data { - switch datum.Attr.Type { - case nl.IFLA_MACVLAN_MODE: + if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { switch native.Uint32(datum.Value[0:4]) { case nl.MACVLAN_MODE_PRIVATE: macv.Mode = MACVLAN_MODE_PRIVATE @@ -1806,16 +1663,7 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { case nl.MACVLAN_MODE_SOURCE: macv.Mode = MACVLAN_MODE_SOURCE } - case nl.IFLA_MACVLAN_MACADDR_COUNT: - macv.MACAddrs = make([]net.HardwareAddr, 0, int(native.Uint32(datum.Value[0:4]))) - case nl.IFLA_MACVLAN_MACADDR_DATA: - macs, err := nl.ParseRouteAttr(datum.Value[:]) - if err != nil { - panic(fmt.Sprintf("failed to ParseRouteAttr for IFLA_MACVLAN_MACADDR_DATA: %v", err)) - } - for _, macDatum := range macs { - macv.MACAddrs = append(macv.MACAddrs, net.HardwareAddr(macDatum.Value[0:6])) - } + return } } } @@ -1823,19 +1671,19 @@ func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { // copied from pkg/net_linux.go func linkFlags(rawFlags uint32) net.Flags { var f net.Flags - if rawFlags&unix.IFF_UP != 0 { + if rawFlags&syscall.IFF_UP != 0 { f |= net.FlagUp } - if rawFlags&unix.IFF_BROADCAST != 0 { + if rawFlags&syscall.IFF_BROADCAST != 0 { f |= net.FlagBroadcast } - if rawFlags&unix.IFF_LOOPBACK != 0 { + if rawFlags&syscall.IFF_LOOPBACK != 0 { f |= net.FlagLoopback } - if rawFlags&unix.IFF_POINTOPOINT != 0 { + if rawFlags&syscall.IFF_POINTOPOINT != 0 { f |= net.FlagPointToPoint } - if rawFlags&unix.IFF_MULTICAST != 0 { + if rawFlags&syscall.IFF_MULTICAST != 0 { f |= net.FlagMulticast } return f @@ -1917,9 +1765,7 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_GRE_ENCAP_FLAGS: gre.EncapFlags = native.Uint16(datum.Value[0:2]) case nl.IFLA_GRE_COLLECT_METADATA: - if len(datum.Value) > 0 { - gre.FlowBased = int8(datum.Value[0]) != 0 - } + gre.FlowBased = int8(datum.Value[0]) != 0 } } } @@ -1956,10 +1802,6 @@ func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gre.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gre.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gre.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gre.EncapDport)) } func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -1985,14 +1827,6 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.Tos = uint8(datum.Value[0]) case nl.IFLA_GRE_PMTUDISC: gre.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_GRE_ENCAP_TYPE: - gre.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_FLAGS: - gre.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_SPORT: - gre.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_GRE_ENCAP_DPORT: - gre.EncapDport = ntohs(datum.Value[0:2]) } } } @@ -2006,7 +1840,7 @@ func parseLinkStats64(data []byte) *LinkStatistics { } func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { - attrs := nl.NewRtAttr(nl.IFLA_XDP|unix.NLA_F_NESTED, nil) + attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil) b := make([]byte, 4) native.PutUint32(b, uint32(xdp.Fd)) nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) @@ -2039,12 +1873,6 @@ func parseLinkXdp(data []byte) (*LinkXdp, error) { } func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { - if iptun.FlowBased { - // In flow based mode, no other attributes need to be configured - nl.NewRtAttrChild(linkInfo, nl.IFLA_IPTUN_COLLECT_METADATA, boolAttr(iptun.FlowBased)) - return - } - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) ip := iptun.Local.To4() @@ -2063,10 +1891,6 @@ func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(iptun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(iptun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(iptun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(iptun.EncapDport)) } func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { @@ -2083,72 +1907,6 @@ func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { iptun.Tos = uint8(datum.Value[0]) case nl.IFLA_IPTUN_PMTUDISC: iptun.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_ENCAP_SPORT: - iptun.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_DPORT: - iptun.EncapDport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_TYPE: - iptun.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_FLAGS: - iptun.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_COLLECT_METADATA: - iptun.FlowBased = int8(datum.Value[0]) != 0 - } - } -} - -func addSittunAttrs(sittun *Sittun, linkInfo *nl.RtAttr) { - data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) - - if sittun.Link != 0 { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(sittun.Link)) - } - - ip := sittun.Local.To4() - if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) - } - - ip = sittun.Remote.To4() - if ip != nil { - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) - } - - if sittun.Ttl > 0 { - // Would otherwise fail on 3.10 kernel - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(sittun.Ttl)) - } - - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(sittun.Tos)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(sittun.PMtuDisc)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_TYPE, nl.Uint16Attr(sittun.EncapType)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_FLAGS, nl.Uint16Attr(sittun.EncapFlags)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_SPORT, htons(sittun.EncapSport)) - nl.NewRtAttrChild(data, nl.IFLA_IPTUN_ENCAP_DPORT, htons(sittun.EncapDport)) -} - -func parseSittunData(link Link, data []syscall.NetlinkRouteAttr) { - sittun := link.(*Sittun) - for _, datum := range data { - switch datum.Attr.Type { - case nl.IFLA_IPTUN_LOCAL: - sittun.Local = net.IP(datum.Value[0:4]) - case nl.IFLA_IPTUN_REMOTE: - sittun.Remote = net.IP(datum.Value[0:4]) - case nl.IFLA_IPTUN_TTL: - sittun.Ttl = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_TOS: - sittun.Tos = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_PMTUDISC: - sittun.PMtuDisc = uint8(datum.Value[0]) - case nl.IFLA_IPTUN_ENCAP_TYPE: - sittun.EncapType = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_FLAGS: - sittun.EncapFlags = native.Uint16(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_SPORT: - sittun.EncapSport = ntohs(datum.Value[0:2]) - case nl.IFLA_IPTUN_ENCAP_DPORT: - sittun.EncapDport = ntohs(datum.Value[0:2]) } } } diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go index 3f5cd497a73..6a6f71ce866 100644 --- a/vendor/github.com/vishvananda/netlink/neigh.go +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -15,8 +15,6 @@ type Neigh struct { IP net.IP HardwareAddr net.HardwareAddr LLIPAddr net.IP //Used in the case of NHRP - Vlan int - VNI int } // String returns $ip/$hwaddr $label diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index f75c22649f9..5edc8b41259 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -2,10 +2,10 @@ package netlink import ( "net" + "syscall" "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) const ( @@ -73,7 +73,7 @@ func NeighAdd(neigh *Neigh) error { // NeighAdd will add an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh add ....` func (h *Handle) NeighAdd(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_EXCL) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) } // NeighSet will add or replace an IP to MAC mapping to the ARP table @@ -85,7 +85,7 @@ func NeighSet(neigh *Neigh) error { // NeighSet will add or replace an IP to MAC mapping to the ARP table // Equivalent to: `ip neigh replace....` func (h *Handle) NeighSet(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_REPLACE) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) } // NeighAppend will append an entry to FDB @@ -97,7 +97,7 @@ func NeighAppend(neigh *Neigh) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) NeighAppend(neigh *Neigh) error { - return h.neighAdd(neigh, unix.NLM_F_CREATE|unix.NLM_F_APPEND) + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) } // NeighAppend will append an entry to FDB @@ -109,7 +109,7 @@ func neighAdd(neigh *Neigh, mode int) error { // NeighAppend will append an entry to FDB // Equivalent to: `bridge fdb append...` func (h *Handle) neighAdd(neigh *Neigh, mode int) error { - req := h.newNetlinkRequest(unix.RTM_NEWNEIGH, mode|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) return neighHandle(neigh, req) } @@ -122,7 +122,7 @@ func NeighDel(neigh *Neigh) error { // NeighDel will delete an IP address from a link device. // Equivalent to: `ip addr del $addr dev $link` func (h *Handle) NeighDel(neigh *Neigh) error { - req := h.newNetlinkRequest(unix.RTM_DELNEIGH, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) return neighHandle(neigh, req) } @@ -160,17 +160,7 @@ func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { req.AddData(hwData) } - if neigh.Vlan != 0 { - vlanData := nl.NewRtAttr(NDA_VLAN, nl.Uint16Attr(uint16(neigh.Vlan))) - req.AddData(vlanData) - } - - if neigh.VNI != 0 { - vniData := nl.NewRtAttr(NDA_VNI, nl.Uint32Attr(uint32(neigh.VNI))) - req.AddData(vniData) - } - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -203,7 +193,7 @@ func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { } func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { - req := h.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) msg := Ndmsg{ Family: uint8(family), Index: uint32(linkIndex), @@ -211,7 +201,7 @@ func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { } req.AddData(&msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWNEIGH) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) if err != nil { return nil, err } @@ -267,7 +257,7 @@ func NeighDeserialize(m []byte) (*Neigh, error) { // BUG: Is this a bug in the netlink library? // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) - attrLen := attr.Attr.Len - unix.SizeofRtAttr + attrLen := attr.Attr.Len - syscall.SizeofRtAttr if attrLen == 4 && (encapType == "ipip" || encapType == "sit" || encapType == "gre") { @@ -278,10 +268,6 @@ func NeighDeserialize(m []byte) (*Neigh, error) { } else { neigh.HardwareAddr = net.HardwareAddr(attr.Value) } - case NDA_VLAN: - neigh.Vlan = int(native.Uint16(attr.Value[0:2])) - case NDA_VNI: - neigh.VNI = int(native.Uint32(attr.Value[0:4])) } } diff --git a/vendor/github.com/vishvananda/netlink/nl/BUILD b/vendor/github.com/vishvananda/netlink/nl/BUILD index af0db8f30ec..b5182d438ed 100644 --- a/vendor/github.com/vishvananda/netlink/nl/BUILD +++ b/vendor/github.com/vishvananda/netlink/nl/BUILD @@ -12,7 +12,6 @@ go_library( "nl_linux.go", "nl_unspecified.go", "route_linux.go", - "seg6_linux.go", "syscall.go", "tc_linux.go", "xfrm_linux.go", @@ -26,7 +25,6 @@ go_library( deps = select({ "@io_bazel_rules_go//go/platform:linux": [ "//vendor/github.com/vishvananda/netns:go_default_library", - "//vendor/golang.org/x/sys/unix:go_default_library", ], "//conditions:default": [], }), diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go index 50db3b4cdd8..fe362e9fa7c 100644 --- a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -1,18 +1,17 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) type IfAddrmsg struct { - unix.IfAddrmsg + syscall.IfAddrmsg } func NewIfAddrmsg(family int) *IfAddrmsg { return &IfAddrmsg{ - IfAddrmsg: unix.IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ Family: uint8(family), }, } @@ -36,15 +35,15 @@ func NewIfAddrmsg(family int) *IfAddrmsg { // SizeofIfAddrmsg = 0x8 func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { - return (*IfAddrmsg)(unsafe.Pointer(&b[0:unix.SizeofIfAddrmsg][0])) + return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) } func (msg *IfAddrmsg) Serialize() []byte { - return (*(*[unix.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfAddrmsg) Len() int { - return unix.SizeofIfAddrmsg + return syscall.SizeofIfAddrmsg } // struct ifa_cacheinfo { diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index ba0b3e19c67..9ae65a12c2a 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -1,15 +1,14 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) const ( DEFAULT_CHANGE = 0xFFFFFFFF // doesn't exist in syscall - IFLA_VFINFO_LIST = unix.IFLA_IFALIAS + 1 + iota + IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota IFLA_STATS64 IFLA_VF_PORTS IFLA_PORT_SELF @@ -119,10 +118,6 @@ const ( IFLA_MACVLAN_UNSPEC = iota IFLA_MACVLAN_MODE IFLA_MACVLAN_FLAGS - IFLA_MACVLAN_MACADDR_MODE - IFLA_MACVLAN_MACADDR - IFLA_MACVLAN_MACADDR_DATA - IFLA_MACVLAN_MACADDR_COUNT IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS ) @@ -134,13 +129,6 @@ const ( MACVLAN_MODE_SOURCE = 16 ) -const ( - MACVLAN_MACADDR_ADD = iota - MACVLAN_MACADDR_DEL - MACVLAN_MACADDR_FLUSH - MACVLAN_MACADDR_SET -) - const ( IFLA_BOND_UNSPEC = iota IFLA_BOND_MODE @@ -487,12 +475,7 @@ const ( IFLA_IPTUN_6RD_RELAY_PREFIX IFLA_IPTUN_6RD_PREFIXLEN IFLA_IPTUN_6RD_RELAY_PREFIXLEN - IFLA_IPTUN_ENCAP_TYPE - IFLA_IPTUN_ENCAP_FLAGS - IFLA_IPTUN_ENCAP_SPORT - IFLA_IPTUN_ENCAP_DPORT - IFLA_IPTUN_COLLECT_METADATA - IFLA_IPTUN_MAX = IFLA_IPTUN_COLLECT_METADATA + IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN ) const ( diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index bc8e82c2cc4..72f7f6af3c8 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -13,19 +13,18 @@ import ( "unsafe" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) const ( // Family type definitions - FAMILY_ALL = unix.AF_UNSPEC - FAMILY_V4 = unix.AF_INET - FAMILY_V6 = unix.AF_INET6 + FAMILY_ALL = syscall.AF_UNSPEC + FAMILY_V4 = syscall.AF_INET + FAMILY_V6 = syscall.AF_INET6 FAMILY_MPLS = AF_MPLS ) // SupportedNlFamilies contains the list of netlink families this netlink package supports -var SupportedNlFamilies = []int{unix.NETLINK_ROUTE, unix.NETLINK_XFRM, unix.NETLINK_NETFILTER} +var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER} var nextSeqNr uint32 @@ -78,161 +77,161 @@ type NetlinkRequestData interface { // IfInfomsg is related to links, but it is used for list requests as well type IfInfomsg struct { - unix.IfInfomsg + syscall.IfInfomsg } // Create an IfInfomsg with family specified func NewIfInfomsg(family int) *IfInfomsg { return &IfInfomsg{ - IfInfomsg: unix.IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ Family: uint8(family), }, } } func DeserializeIfInfomsg(b []byte) *IfInfomsg { - return (*IfInfomsg)(unsafe.Pointer(&b[0:unix.SizeofIfInfomsg][0])) + return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) } func (msg *IfInfomsg) Serialize() []byte { - return (*(*[unix.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] } func (msg *IfInfomsg) Len() int { - return unix.SizeofIfInfomsg + return syscall.SizeofIfInfomsg } func (msg *IfInfomsg) EncapType() string { switch msg.Type { case 0: return "generic" - case unix.ARPHRD_ETHER: + case syscall.ARPHRD_ETHER: return "ether" - case unix.ARPHRD_EETHER: + case syscall.ARPHRD_EETHER: return "eether" - case unix.ARPHRD_AX25: + case syscall.ARPHRD_AX25: return "ax25" - case unix.ARPHRD_PRONET: + case syscall.ARPHRD_PRONET: return "pronet" - case unix.ARPHRD_CHAOS: + case syscall.ARPHRD_CHAOS: return "chaos" - case unix.ARPHRD_IEEE802: + case syscall.ARPHRD_IEEE802: return "ieee802" - case unix.ARPHRD_ARCNET: + case syscall.ARPHRD_ARCNET: return "arcnet" - case unix.ARPHRD_APPLETLK: + case syscall.ARPHRD_APPLETLK: return "atalk" - case unix.ARPHRD_DLCI: + case syscall.ARPHRD_DLCI: return "dlci" - case unix.ARPHRD_ATM: + case syscall.ARPHRD_ATM: return "atm" - case unix.ARPHRD_METRICOM: + case syscall.ARPHRD_METRICOM: return "metricom" - case unix.ARPHRD_IEEE1394: + case syscall.ARPHRD_IEEE1394: return "ieee1394" - case unix.ARPHRD_INFINIBAND: + case syscall.ARPHRD_INFINIBAND: return "infiniband" - case unix.ARPHRD_SLIP: + case syscall.ARPHRD_SLIP: return "slip" - case unix.ARPHRD_CSLIP: + case syscall.ARPHRD_CSLIP: return "cslip" - case unix.ARPHRD_SLIP6: + case syscall.ARPHRD_SLIP6: return "slip6" - case unix.ARPHRD_CSLIP6: + case syscall.ARPHRD_CSLIP6: return "cslip6" - case unix.ARPHRD_RSRVD: + case syscall.ARPHRD_RSRVD: return "rsrvd" - case unix.ARPHRD_ADAPT: + case syscall.ARPHRD_ADAPT: return "adapt" - case unix.ARPHRD_ROSE: + case syscall.ARPHRD_ROSE: return "rose" - case unix.ARPHRD_X25: + case syscall.ARPHRD_X25: return "x25" - case unix.ARPHRD_HWX25: + case syscall.ARPHRD_HWX25: return "hwx25" - case unix.ARPHRD_PPP: + case syscall.ARPHRD_PPP: return "ppp" - case unix.ARPHRD_HDLC: + case syscall.ARPHRD_HDLC: return "hdlc" - case unix.ARPHRD_LAPB: + case syscall.ARPHRD_LAPB: return "lapb" - case unix.ARPHRD_DDCMP: + case syscall.ARPHRD_DDCMP: return "ddcmp" - case unix.ARPHRD_RAWHDLC: + case syscall.ARPHRD_RAWHDLC: return "rawhdlc" - case unix.ARPHRD_TUNNEL: + case syscall.ARPHRD_TUNNEL: return "ipip" - case unix.ARPHRD_TUNNEL6: + case syscall.ARPHRD_TUNNEL6: return "tunnel6" - case unix.ARPHRD_FRAD: + case syscall.ARPHRD_FRAD: return "frad" - case unix.ARPHRD_SKIP: + case syscall.ARPHRD_SKIP: return "skip" - case unix.ARPHRD_LOOPBACK: + case syscall.ARPHRD_LOOPBACK: return "loopback" - case unix.ARPHRD_LOCALTLK: + case syscall.ARPHRD_LOCALTLK: return "ltalk" - case unix.ARPHRD_FDDI: + case syscall.ARPHRD_FDDI: return "fddi" - case unix.ARPHRD_BIF: + case syscall.ARPHRD_BIF: return "bif" - case unix.ARPHRD_SIT: + case syscall.ARPHRD_SIT: return "sit" - case unix.ARPHRD_IPDDP: + case syscall.ARPHRD_IPDDP: return "ip/ddp" - case unix.ARPHRD_IPGRE: + case syscall.ARPHRD_IPGRE: return "gre" - case unix.ARPHRD_PIMREG: + case syscall.ARPHRD_PIMREG: return "pimreg" - case unix.ARPHRD_HIPPI: + case syscall.ARPHRD_HIPPI: return "hippi" - case unix.ARPHRD_ASH: + case syscall.ARPHRD_ASH: return "ash" - case unix.ARPHRD_ECONET: + case syscall.ARPHRD_ECONET: return "econet" - case unix.ARPHRD_IRDA: + case syscall.ARPHRD_IRDA: return "irda" - case unix.ARPHRD_FCPP: + case syscall.ARPHRD_FCPP: return "fcpp" - case unix.ARPHRD_FCAL: + case syscall.ARPHRD_FCAL: return "fcal" - case unix.ARPHRD_FCPL: + case syscall.ARPHRD_FCPL: return "fcpl" - case unix.ARPHRD_FCFABRIC: + case syscall.ARPHRD_FCFABRIC: return "fcfb0" - case unix.ARPHRD_FCFABRIC + 1: + case syscall.ARPHRD_FCFABRIC + 1: return "fcfb1" - case unix.ARPHRD_FCFABRIC + 2: + case syscall.ARPHRD_FCFABRIC + 2: return "fcfb2" - case unix.ARPHRD_FCFABRIC + 3: + case syscall.ARPHRD_FCFABRIC + 3: return "fcfb3" - case unix.ARPHRD_FCFABRIC + 4: + case syscall.ARPHRD_FCFABRIC + 4: return "fcfb4" - case unix.ARPHRD_FCFABRIC + 5: + case syscall.ARPHRD_FCFABRIC + 5: return "fcfb5" - case unix.ARPHRD_FCFABRIC + 6: + case syscall.ARPHRD_FCFABRIC + 6: return "fcfb6" - case unix.ARPHRD_FCFABRIC + 7: + case syscall.ARPHRD_FCFABRIC + 7: return "fcfb7" - case unix.ARPHRD_FCFABRIC + 8: + case syscall.ARPHRD_FCFABRIC + 8: return "fcfb8" - case unix.ARPHRD_FCFABRIC + 9: + case syscall.ARPHRD_FCFABRIC + 9: return "fcfb9" - case unix.ARPHRD_FCFABRIC + 10: + case syscall.ARPHRD_FCFABRIC + 10: return "fcfb10" - case unix.ARPHRD_FCFABRIC + 11: + case syscall.ARPHRD_FCFABRIC + 11: return "fcfb11" - case unix.ARPHRD_FCFABRIC + 12: + case syscall.ARPHRD_FCFABRIC + 12: return "fcfb12" - case unix.ARPHRD_IEEE802_TR: + case syscall.ARPHRD_IEEE802_TR: return "tr" - case unix.ARPHRD_IEEE80211: + case syscall.ARPHRD_IEEE80211: return "ieee802.11" - case unix.ARPHRD_IEEE80211_PRISM: + case syscall.ARPHRD_IEEE80211_PRISM: return "ieee802.11/prism" - case unix.ARPHRD_IEEE80211_RADIOTAP: + case syscall.ARPHRD_IEEE80211_RADIOTAP: return "ieee802.11/radiotap" - case unix.ARPHRD_IEEE802154: + case syscall.ARPHRD_IEEE802154: return "ieee802.15.4" case 65534: @@ -244,7 +243,7 @@ func (msg *IfInfomsg) EncapType() string { } func rtaAlignOf(attrlen int) int { - return (attrlen + unix.RTA_ALIGNTO - 1) & ^(unix.RTA_ALIGNTO - 1) + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) } func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { @@ -255,7 +254,7 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { // Extend RtAttr to handle data and children type RtAttr struct { - unix.RtAttr + syscall.RtAttr Data []byte children []NetlinkRequestData } @@ -263,7 +262,7 @@ type RtAttr struct { // Create a new Extended RtAttr object func NewRtAttr(attrType int, data []byte) *RtAttr { return &RtAttr{ - RtAttr: unix.RtAttr{ + RtAttr: syscall.RtAttr{ Type: uint16(attrType), }, children: []NetlinkRequestData{}, @@ -278,21 +277,16 @@ func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { return attr } -// AddChild adds an existing RtAttr as a child. -func (a *RtAttr) AddChild(attr *RtAttr) { - a.children = append(a.children, attr) -} - func (a *RtAttr) Len() int { if len(a.children) == 0 { - return (unix.SizeofRtAttr + len(a.Data)) + return (syscall.SizeofRtAttr + len(a.Data)) } l := 0 for _, child := range a.children { l += rtaAlignOf(child.Len()) } - l += unix.SizeofRtAttr + l += syscall.SizeofRtAttr return rtaAlignOf(l + len(a.Data)) } @@ -325,7 +319,7 @@ func (a *RtAttr) Serialize() []byte { } type NetlinkRequest struct { - unix.NlMsghdr + syscall.NlMsghdr Data []NetlinkRequestData RawData []byte Sockets map[int]*SocketHandle @@ -333,7 +327,7 @@ type NetlinkRequest struct { // Serialize the Netlink Request into a byte array func (req *NetlinkRequest) Serialize() []byte { - length := unix.SizeofNlMsghdr + length := syscall.SizeofNlMsghdr dataBytes := make([][]byte, len(req.Data)) for i, data := range req.Data { dataBytes[i] = data.Serialize() @@ -343,8 +337,8 @@ func (req *NetlinkRequest) Serialize() []byte { req.Len = uint32(length) b := make([]byte, length) - hdr := (*(*[unix.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] - next := unix.SizeofNlMsghdr + hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := syscall.SizeofNlMsghdr copy(b[0:next], hdr) for _, data := range dataBytes { for _, dataByte := range data { @@ -427,10 +421,10 @@ done: if m.Header.Pid != pid { return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) } - if m.Header.Type == unix.NLMSG_DONE { + if m.Header.Type == syscall.NLMSG_DONE { break done } - if m.Header.Type == unix.NLMSG_ERROR { + if m.Header.Type == syscall.NLMSG_ERROR { native := NativeEndian() error := int32(native.Uint32(m.Data[0:4])) if error == 0 { @@ -442,7 +436,7 @@ done: continue } res = append(res, m.Data) - if m.Header.Flags&unix.NLM_F_MULTI == 0 { + if m.Header.Flags&syscall.NLM_F_MULTI == 0 { break done } } @@ -455,10 +449,10 @@ done: // the message is serialized func NewNetlinkRequest(proto, flags int) *NetlinkRequest { return &NetlinkRequest{ - NlMsghdr: unix.NlMsghdr{ - Len: uint32(unix.SizeofNlMsghdr), + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), Type: uint16(proto), - Flags: unix.NLM_F_REQUEST | uint16(flags), + Flags: syscall.NLM_F_REQUEST | uint16(flags), Seq: atomic.AddUint32(&nextSeqNr, 1), }, } @@ -466,21 +460,21 @@ func NewNetlinkRequest(proto, flags int) *NetlinkRequest { type NetlinkSocket struct { fd int32 - lsa unix.SockaddrNetlink + lsa syscall.SockaddrNetlink sync.Mutex } func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW|unix.SOCK_CLOEXEC, protocol) + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = unix.AF_NETLINK - if err := unix.Bind(fd, &s.lsa); err != nil { - unix.Close(fd) + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) return nil, err } @@ -557,21 +551,21 @@ func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { // Returns the netlink socket on which Receive() method can be called // to retrieve the messages from the kernel. func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { - fd, err := unix.Socket(unix.AF_NETLINK, unix.SOCK_RAW, protocol) + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) if err != nil { return nil, err } s := &NetlinkSocket{ fd: int32(fd), } - s.lsa.Family = unix.AF_NETLINK + s.lsa.Family = syscall.AF_NETLINK for _, g := range groups { s.lsa.Groups |= (1 << (g - 1)) } - if err := unix.Bind(fd, &s.lsa); err != nil { - unix.Close(fd) + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) return nil, err } @@ -592,7 +586,7 @@ func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*Ne func (s *NetlinkSocket) Close() { fd := int(atomic.SwapInt32(&s.fd, -1)) - unix.Close(fd) + syscall.Close(fd) } func (s *NetlinkSocket) GetFd() int { @@ -604,7 +598,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error { if fd < 0 { return fmt.Errorf("Send called on a closed socket") } - if err := unix.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { + if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { return err } return nil @@ -615,12 +609,12 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { if fd < 0 { return nil, fmt.Errorf("Receive called on a closed socket") } - rb := make([]byte, unix.Getpagesize()) - nr, _, err := unix.Recvfrom(fd, rb, 0) + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(fd, rb, 0) if err != nil { return nil, err } - if nr < unix.NLMSG_HDRLEN { + if nr < syscall.NLMSG_HDRLEN { return nil, fmt.Errorf("Got short response from netlink") } rb = rb[:nr] @@ -628,27 +622,27 @@ func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { } // SetSendTimeout allows to set a send timeout on the socket -func (s *NetlinkSocket) SetSendTimeout(timeout *unix.Timeval) error { +func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error { // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine // remains stuck on a send on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_SNDTIMEO, timeout) + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout) } // SetReceiveTimeout allows to set a receive timeout on the socket -func (s *NetlinkSocket) SetReceiveTimeout(timeout *unix.Timeval) error { +func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error { // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine // remains stuck on a recvmsg on a closed fd - return unix.SetsockoptTimeval(int(s.fd), unix.SOL_SOCKET, unix.SO_RCVTIMEO, timeout) + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout) } func (s *NetlinkSocket) GetPid() (uint32, error) { fd := int(atomic.LoadInt32(&s.fd)) - lsa, err := unix.Getsockname(fd) + lsa, err := syscall.Getsockname(fd) if err != nil { return 0, err } switch v := lsa.(type) { - case *unix.SockaddrNetlink: + case *syscall.SockaddrNetlink: return v.Pid, nil } return 0, fmt.Errorf("Wrong socket type") @@ -703,24 +697,24 @@ func Uint64Attr(v uint64) []byte { func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { var attrs []syscall.NetlinkRouteAttr - for len(b) >= unix.SizeofRtAttr { + for len(b) >= syscall.SizeofRtAttr { a, vbuf, alen, err := netlinkRouteAttrAndValue(b) if err != nil { return nil, err } - ra := syscall.NetlinkRouteAttr{Attr: syscall.RtAttr(*a), Value: vbuf[:int(a.Len)-unix.SizeofRtAttr]} + ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} attrs = append(attrs, ra) b = b[alen:] } return attrs, nil } -func netlinkRouteAttrAndValue(b []byte) (*unix.RtAttr, []byte, int, error) { - a := (*unix.RtAttr)(unsafe.Pointer(&b[0])) - if int(a.Len) < unix.SizeofRtAttr || int(a.Len) > len(b) { - return nil, nil, 0, unix.EINVAL +func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { + a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, syscall.EINVAL } - return a, b[unix.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil + return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil } // SocketHandle contains the netlink socket and the associated diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index f6906fcaf7e..1a064d65d2f 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -1,66 +1,65 @@ package nl import ( + "syscall" "unsafe" - - "golang.org/x/sys/unix" ) type RtMsg struct { - unix.RtMsg + syscall.RtMsg } func NewRtMsg() *RtMsg { return &RtMsg{ - RtMsg: unix.RtMsg{ - Table: unix.RT_TABLE_MAIN, - Scope: unix.RT_SCOPE_UNIVERSE, - Protocol: unix.RTPROT_BOOT, - Type: unix.RTN_UNICAST, + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, }, } } func NewRtDelMsg() *RtMsg { return &RtMsg{ - RtMsg: unix.RtMsg{ - Table: unix.RT_TABLE_MAIN, - Scope: unix.RT_SCOPE_NOWHERE, + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_NOWHERE, }, } } func (msg *RtMsg) Len() int { - return unix.SizeofRtMsg + return syscall.SizeofRtMsg } func DeserializeRtMsg(b []byte) *RtMsg { - return (*RtMsg)(unsafe.Pointer(&b[0:unix.SizeofRtMsg][0])) + return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) } func (msg *RtMsg) Serialize() []byte { - return (*(*[unix.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] + return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] } type RtNexthop struct { - unix.RtNexthop + syscall.RtNexthop Children []NetlinkRequestData } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) + return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0])) } func (msg *RtNexthop) Len() int { if len(msg.Children) == 0 { - return unix.SizeofRtNexthop + return syscall.SizeofRtNexthop } l := 0 for _, child := range msg.Children { l += rtaAlignOf(child.Len()) } - l += unix.SizeofRtNexthop + l += syscall.SizeofRtNexthop return rtaAlignOf(l) } @@ -68,8 +67,8 @@ func (msg *RtNexthop) Serialize() []byte { length := msg.Len() msg.RtNexthop.Len = uint16(length) buf := make([]byte, length) - copy(buf, (*(*[unix.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) - next := rtaAlignOf(unix.SizeofRtNexthop) + copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(syscall.SizeofRtNexthop) if len(msg.Children) > 0 { for _, child := range msg.Children { childBuf := child.Serialize() diff --git a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go b/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go deleted file mode 100644 index b3425f6b0ec..00000000000 --- a/vendor/github.com/vishvananda/netlink/nl/seg6_linux.go +++ /dev/null @@ -1,111 +0,0 @@ -package nl - -import ( - "errors" - "fmt" - "net" -) - -type IPv6SrHdr struct { - nextHdr uint8 - hdrLen uint8 - routingType uint8 - segmentsLeft uint8 - firstSegment uint8 - flags uint8 - reserved uint16 - - Segments []net.IP -} - -func (s1 *IPv6SrHdr) Equal(s2 IPv6SrHdr) bool { - if len(s1.Segments) != len(s2.Segments) { - return false - } - for i := range s1.Segments { - if s1.Segments[i].Equal(s2.Segments[i]) != true { - return false - } - } - return s1.nextHdr == s2.nextHdr && - s1.hdrLen == s2.hdrLen && - s1.routingType == s2.routingType && - s1.segmentsLeft == s2.segmentsLeft && - s1.firstSegment == s2.firstSegment && - s1.flags == s2.flags - // reserved doesn't need to be identical. -} - -// seg6 encap mode -const ( - SEG6_IPTUN_MODE_INLINE = iota - SEG6_IPTUN_MODE_ENCAP -) - -// number of nested RTATTR -// from include/uapi/linux/seg6_iptunnel.h -const ( - SEG6_IPTUNNEL_UNSPEC = iota - SEG6_IPTUNNEL_SRH - __SEG6_IPTUNNEL_MAX -) -const ( - SEG6_IPTUNNEL_MAX = __SEG6_IPTUNNEL_MAX - 1 -) - -func EncodeSEG6Encap(mode int, segments []net.IP) ([]byte, error) { - nsegs := len(segments) // nsegs: number of segments - if nsegs == 0 { - return nil, errors.New("EncodeSEG6Encap: No Segment in srh") - } - b := make([]byte, 12, 12+len(segments)*16) - native := NativeEndian() - native.PutUint32(b, uint32(mode)) - b[4] = 0 // srh.nextHdr (0 when calling netlink) - b[5] = uint8(16 * nsegs >> 3) // srh.hdrLen (in 8-octets unit) - b[6] = IPV6_SRCRT_TYPE_4 // srh.routingType (assigned by IANA) - b[7] = uint8(nsegs - 1) // srh.segmentsLeft - b[8] = uint8(nsegs - 1) // srh.firstSegment - b[9] = 0 // srh.flags (SR6_FLAG1_HMAC for srh_hmac) - // srh.reserved: Defined as "Tag" in draft-ietf-6man-segment-routing-header-07 - native.PutUint16(b[10:], 0) // srh.reserved - for _, netIP := range segments { - b = append(b, netIP...) // srh.Segments - } - return b, nil -} - -func DecodeSEG6Encap(buf []byte) (int, []net.IP, error) { - native := NativeEndian() - mode := int(native.Uint32(buf)) - srh := IPv6SrHdr{ - nextHdr: buf[4], - hdrLen: buf[5], - routingType: buf[6], - segmentsLeft: buf[7], - firstSegment: buf[8], - flags: buf[9], - reserved: native.Uint16(buf[10:12]), - } - buf = buf[12:] - if len(buf)%16 != 0 { - err := fmt.Errorf("DecodeSEG6Encap: error parsing Segment List (buf len: %d)\n", len(buf)) - return mode, nil, err - } - for len(buf) > 0 { - srh.Segments = append(srh.Segments, net.IP(buf[:16])) - buf = buf[16:] - } - return mode, srh.Segments, nil -} - -// Helper functions -func SEG6EncapModeString(mode int) string { - switch mode { - case SEG6_IPTUN_MODE_INLINE: - return "inline" - case SEG6_IPTUN_MODE_ENCAP: - return "encap" - } - return "unknown" -} diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go index fc631e0e505..3473e536384 100644 --- a/vendor/github.com/vishvananda/netlink/nl/syscall.go +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -65,14 +65,4 @@ const ( LWTUNNEL_ENCAP_IP LWTUNNEL_ENCAP_ILA LWTUNNEL_ENCAP_IP6 - LWTUNNEL_ENCAP_SEG6 - LWTUNNEL_ENCAP_BPF -) - -// routing header types -const ( - IPV6_SRCRT_STRICT = 0x01 // Deprecated; will be removed - IPV6_SRCRT_TYPE_0 = 0 // Deprecated; will be removed - IPV6_SRCRT_TYPE_2 = 2 // IPv6 type 2 Routing Header - IPV6_SRCRT_TYPE_4 = 4 // Segment Routing with IPv6 ) diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go index 43c465f0575..10dd0d53357 100644 --- a/vendor/github.com/vishvananda/netlink/protinfo_linux.go +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -5,7 +5,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func LinkGetProtinfo(link Link) (Protinfo, error) { @@ -16,10 +15,10 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { base := link.Attrs() h.ensureIndex(base) var pi Protinfo - req := h.newNetlinkRequest(unix.RTM_GETLINK, unix.NLM_F_DUMP) - msg := nl.NewIfInfomsg(unix.AF_BRIDGE) + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, 0) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) if err != nil { return pi, err } @@ -34,7 +33,7 @@ func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { return pi, err } for _, attr := range attrs { - if attr.Attr.Type != unix.IFLA_PROTINFO|unix.NLA_F_NESTED { + if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { continue } infos, err := nl.ParseRouteAttr(attr.Value) diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 91193145ae7..1123396e47d 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -8,7 +8,6 @@ import ( "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) // NOTE function is here because it uses other linux functions @@ -85,7 +84,7 @@ func QdiscDel(qdisc Qdisc) error { // QdiscDel will delete a qdisc from the system. // Equivalent to: `tc qdisc del $qdisc` func (h *Handle) QdiscDel(qdisc Qdisc) error { - return h.qdiscModify(unix.RTM_DELQDISC, 0, qdisc) + return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) } // QdiscChange will change a qdisc in place @@ -99,7 +98,7 @@ func QdiscChange(qdisc Qdisc) error { // Equivalent to: `tc qdisc change $qdisc` // The parent and handle MUST NOT be changed. func (h *Handle) QdiscChange(qdisc Qdisc) error { - return h.qdiscModify(unix.RTM_NEWQDISC, 0, qdisc) + return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) } // QdiscReplace will replace a qdisc to the system. @@ -114,8 +113,8 @@ func QdiscReplace(qdisc Qdisc) error { // The handle MUST change. func (h *Handle) QdiscReplace(qdisc Qdisc) error { return h.qdiscModify( - unix.RTM_NEWQDISC, - unix.NLM_F_CREATE|unix.NLM_F_REPLACE, + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, qdisc) } @@ -129,13 +128,13 @@ func QdiscAdd(qdisc Qdisc) error { // Equivalent to: `tc qdisc add $qdisc` func (h *Handle) QdiscAdd(qdisc Qdisc) error { return h.qdiscModify( - unix.RTM_NEWQDISC, - unix.NLM_F_CREATE|unix.NLM_F_EXCL, + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, qdisc) } func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { - req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) base := qdisc.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -146,13 +145,13 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { req.AddData(msg) // When deleting don't bother building the rest of the netlink payload - if cmd != unix.RTM_DELQDISC { + if cmd != syscall.RTM_DELQDISC { if err := qdiscPayload(req, qdisc); err != nil { return err } } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -249,7 +248,7 @@ func QdiscList(link Link) ([]Qdisc, error) { // Equivalent to: `tc qdisc show`. // The list can be filtered by link. func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { - req := h.newNetlinkRequest(unix.RTM_GETQDISC, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) index := int32(0) if link != nil { base := link.Attrs() @@ -262,7 +261,7 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { } req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWQDISC) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go index 2cd58ee3342..68c6a2230d2 100644 --- a/vendor/github.com/vishvananda/netlink/route.go +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -45,8 +45,6 @@ type Route struct { MPLSDst *int NewDst Destination Encap Encap - MTU int - AdvMSS int } func (r Route) String() string { diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index fd5ac898354..9234c6986da 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -8,17 +8,16 @@ import ( "github.com/vishvananda/netlink/nl" "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) // RtAttr is shared so it is in netlink_linux.go const ( - SCOPE_UNIVERSE Scope = unix.RT_SCOPE_UNIVERSE - SCOPE_SITE Scope = unix.RT_SCOPE_SITE - SCOPE_LINK Scope = unix.RT_SCOPE_LINK - SCOPE_HOST Scope = unix.RT_SCOPE_HOST - SCOPE_NOWHERE Scope = unix.RT_SCOPE_NOWHERE + SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = syscall.RT_SCOPE_SITE + SCOPE_LINK Scope = syscall.RT_SCOPE_LINK + SCOPE_HOST Scope = syscall.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE ) const ( @@ -35,8 +34,8 @@ const ( ) const ( - FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK - FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE + FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE ) var testFlags = []flagString{ @@ -125,17 +124,17 @@ func (e *MPLSEncap) Type() int { func (e *MPLSEncap) Decode(buf []byte) error { if len(buf) < 4 { - return fmt.Errorf("lack of bytes") + return fmt.Errorf("Lack of bytes") } native := nl.NativeEndian() l := native.Uint16(buf) if len(buf) < int(l) { - return fmt.Errorf("lack of bytes") + return fmt.Errorf("Lack of bytes") } buf = buf[:l] typ := native.Uint16(buf[2:]) if typ != nl.MPLS_IPTUNNEL_DST { - return fmt.Errorf("unknown MPLS Encap Type: %d", typ) + return fmt.Errorf("Unknown MPLS Encap Type: %d", typ) } e.Labels = nl.DecodeMPLSStack(buf[4:]) return nil @@ -186,79 +185,6 @@ func (e *MPLSEncap) Equal(x Encap) bool { return true } -// SEG6 definitions -type SEG6Encap struct { - Mode int - Segments []net.IP -} - -func (e *SEG6Encap) Type() int { - return nl.LWTUNNEL_ENCAP_SEG6 -} -func (e *SEG6Encap) Decode(buf []byte) error { - if len(buf) < 4 { - return fmt.Errorf("lack of bytes") - } - native := nl.NativeEndian() - // Get Length(l) & Type(typ) : 2 + 2 bytes - l := native.Uint16(buf) - if len(buf) < int(l) { - return fmt.Errorf("lack of bytes") - } - buf = buf[:l] // make sure buf size upper limit is Length - typ := native.Uint16(buf[2:]) - if typ != nl.SEG6_IPTUNNEL_SRH { - return fmt.Errorf("unknown SEG6 Type: %d", typ) - } - - var err error - e.Mode, e.Segments, err = nl.DecodeSEG6Encap(buf[4:]) - - return err -} -func (e *SEG6Encap) Encode() ([]byte, error) { - s, err := nl.EncodeSEG6Encap(e.Mode, e.Segments) - native := nl.NativeEndian() - hdr := make([]byte, 4) - native.PutUint16(hdr, uint16(len(s)+4)) - native.PutUint16(hdr[2:], nl.SEG6_IPTUNNEL_SRH) - return append(hdr, s...), err -} -func (e *SEG6Encap) String() string { - segs := make([]string, 0, len(e.Segments)) - // append segment backwards (from n to 0) since seg#0 is the last segment. - for i := len(e.Segments); i > 0; i-- { - segs = append(segs, fmt.Sprintf("%s", e.Segments[i-1])) - } - str := fmt.Sprintf("mode %s segs %d [ %s ]", nl.SEG6EncapModeString(e.Mode), - len(e.Segments), strings.Join(segs, " ")) - return str -} -func (e *SEG6Encap) Equal(x Encap) bool { - o, ok := x.(*SEG6Encap) - if !ok { - return false - } - if e == o { - return true - } - if e == nil || o == nil { - return false - } - if e.Mode != o.Mode { - return false - } - if len(e.Segments) != len(o.Segments) { - return false - } - for i := range e.Segments { - if !e.Segments[i].Equal(o.Segments[i]) { - return false - } - } - return true -} - // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func RouteAdd(route *Route) error { @@ -268,8 +194,8 @@ func RouteAdd(route *Route) error { // RouteAdd will add a route to the system. // Equivalent to: `ip route add $route` func (h *Handle) RouteAdd(route *Route) error { - flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK - req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -282,8 +208,8 @@ func RouteReplace(route *Route) error { // RouteReplace will add a route to the system. // Equivalent to: `ip route replace $route` func (h *Handle) RouteReplace(route *Route) error { - flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK - req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) + flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) return h.routeHandle(route, req, nl.NewRtMsg()) } @@ -296,7 +222,7 @@ func RouteDel(route *Route) error { // RouteDel will delete a route from the system. // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { - req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) return h.routeHandle(route, req, nl.NewRtDelMsg()) } @@ -319,12 +245,12 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { dstData = route.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) } else if route.MPLSDst != nil { family = nl.FAMILY_MPLS msg.Dst_len = uint8(20) - msg.Type = unix.RTN_UNICAST - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + msg.Type = syscall.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) } if route.NewDst != nil { @@ -362,7 +288,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg srcData = route.Src.To16() } // The commonly used src ip for routes is actually PREFSRC - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PREFSRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) } if route.Gw != nil { @@ -377,14 +303,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } else { gwData = route.Gw.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_GATEWAY, gwData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) } if len(route.MultiPath) > 0 { buf := []byte{} for _, nh := range route.MultiPath { rtnh := &nl.RtNexthop{ - RtNexthop: unix.RtNexthop{ + RtNexthop: syscall.RtNexthop{ Hops: uint8(nh.Hops), Ifindex: int32(nh.LinkIndex), Flags: uint8(nh.Flags), @@ -397,9 +323,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg return fmt.Errorf("gateway, source, and destination ip are not the same IP family") } if gwFamily == FAMILY_V4 { - children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4()))) } else { - children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To16()))) + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16()))) } } if nh.NewDst != nil { @@ -425,15 +351,15 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtnh.Children = children buf = append(buf, rtnh.Serialize()...) } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_MULTIPATH, buf)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf)) } if route.Table > 0 { if route.Table >= 256 { - msg.Table = unix.RT_TABLE_UNSPEC + msg.Table = syscall.RT_TABLE_UNSPEC b := make([]byte, 4) native.PutUint32(b, uint32(route.Table)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_TABLE, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) } else { msg.Table = uint8(route.Table) } @@ -442,7 +368,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Priority > 0 { b := make([]byte, 4) native.PutUint32(b, uint32(route.Priority)) - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_PRIORITY, b)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) } if route.Tos > 0 { msg.Tos = uint8(route.Tos) @@ -454,25 +380,6 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Type = uint8(route.Type) } - var metrics []*nl.RtAttr - // TODO: support other rta_metric values - if route.MTU > 0 { - b := nl.Uint32Attr(uint32(route.MTU)) - metrics = append(metrics, nl.NewRtAttr(unix.RTAX_MTU, b)) - } - if route.AdvMSS > 0 { - b := nl.Uint32Attr(uint32(route.AdvMSS)) - metrics = append(metrics, nl.NewRtAttr(unix.RTAX_ADVMSS, b)) - } - - if metrics != nil { - attr := nl.NewRtAttr(unix.RTA_METRICS, nil) - for _, metric := range metrics { - attr.AddChild(metric) - } - rtAttrs = append(rtAttrs, attr) - } - msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) msg.Family = uint8(family) @@ -487,9 +394,9 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg ) native.PutUint32(b, uint32(route.LinkIndex)) - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -522,11 +429,11 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // RouteListFiltered gets a list of routes in the system filtered with specified rules. // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) infmsg := nl.NewIfInfomsg(family) req.AddData(infmsg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) if err != nil { return nil, err } @@ -534,11 +441,11 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) var res []Route for _, m := range msgs { msg := nl.DeserializeRtMsg(m) - if msg.Flags&unix.RTM_F_CLONED != 0 { + if msg.Flags&syscall.RTM_F_CLONED != 0 { // Ignore cloned routes continue } - if msg.Table != unix.RT_TABLE_MAIN { + if msg.Table != syscall.RT_TABLE_MAIN { if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { // Ignore non-main tables continue @@ -550,7 +457,7 @@ func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) } if filter != nil { switch { - case filterMask&RT_FILTER_TABLE != 0 && filter.Table != unix.RT_TABLE_UNSPEC && route.Table != filter.Table: + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table: continue case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: continue @@ -601,11 +508,11 @@ func deserializeRoute(m []byte) (Route, error) { var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case unix.RTA_GATEWAY: + case syscall.RTA_GATEWAY: route.Gw = net.IP(attr.Value) - case unix.RTA_PREFSRC: + case syscall.RTA_PREFSRC: route.Src = net.IP(attr.Value) - case unix.RTA_DST: + case syscall.RTA_DST: if msg.Family == nl.FAMILY_MPLS { stack := nl.DecodeMPLSStack(attr.Value) if len(stack) == 0 || len(stack) > 1 { @@ -618,36 +525,36 @@ func deserializeRoute(m []byte) (Route, error) { Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), } } - case unix.RTA_OIF: + case syscall.RTA_OIF: route.LinkIndex = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_IIF: + case syscall.RTA_IIF: route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_PRIORITY: + case syscall.RTA_PRIORITY: route.Priority = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_TABLE: + case syscall.RTA_TABLE: route.Table = int(native.Uint32(attr.Value[0:4])) - case unix.RTA_MULTIPATH: + case syscall.RTA_MULTIPATH: parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { - if len(value) < unix.SizeofRtNexthop { - return nil, nil, fmt.Errorf("lack of bytes") + if len(value) < syscall.SizeofRtNexthop { + return nil, nil, fmt.Errorf("Lack of bytes") } nh := nl.DeserializeRtNexthop(value) if len(value) < int(nh.RtNexthop.Len) { - return nil, nil, fmt.Errorf("lack of bytes") + return nil, nil, fmt.Errorf("Lack of bytes") } info := &NexthopInfo{ LinkIndex: int(nh.RtNexthop.Ifindex), Hops: int(nh.RtNexthop.Hops), Flags: int(nh.RtNexthop.Flags), } - attrs, err := nl.ParseRouteAttr(value[unix.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)]) if err != nil { return nil, nil, err } var encap, encapType syscall.NetlinkRouteAttr for _, attr := range attrs { switch attr.Attr.Type { - case unix.RTA_GATEWAY: + case syscall.RTA_GATEWAY: info.Gw = net.IP(attr.Value) case nl.RTA_NEWDST: var d Destination @@ -704,19 +611,6 @@ func deserializeRoute(m []byte) (Route, error) { encapType = attr case nl.RTA_ENCAP: encap = attr - case unix.RTA_METRICS: - metrics, err := nl.ParseRouteAttr(attr.Value) - if err != nil { - return route, err - } - for _, metric := range metrics { - switch metric.Attr.Type { - case unix.RTAX_MTU: - route.MTU = int(native.Uint32(metric.Value[0:4])) - case unix.RTAX_ADVMSS: - route.AdvMSS = int(native.Uint32(metric.Value[0:4])) - } - } } } @@ -729,11 +623,6 @@ func deserializeRoute(m []byte) (Route, error) { if err := e.Decode(encap.Value); err != nil { return route, err } - case nl.LWTUNNEL_ENCAP_SEG6: - e = &SEG6Encap{} - if err := e.Decode(encap.Value); err != nil { - return route, err - } } route.Encap = e } @@ -750,7 +639,7 @@ func RouteGet(destination net.IP) ([]Route, error) { // RouteGet gets a route to a specific destination from the host system. // Equivalent to: 'ip route get'. func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { - req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_REQUEST) + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) family := nl.GetIPFamily(destination) var destinationData []byte var bitlen uint8 @@ -766,10 +655,10 @@ func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { msg.Dst_len = bitlen req.AddData(msg) - rtaDst := nl.NewRtAttr(unix.RTA_DST, destinationData) + rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) req.AddData(rtaDst) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) if err != nil { return nil, err } @@ -817,7 +706,7 @@ func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, opti } func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error { - s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_IPV4_ROUTE, unix.RTNLGRP_IPV6_ROUTE) + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) if err != nil { return err } diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 7fc8ae5df15..e4d9168d6c0 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -21,7 +21,6 @@ type Rule struct { OifName string SuppressIfgroup int SuppressPrefixlen int - Invert bool } func (r Rule) String() string { diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 6238ae45864..cbd91a56bb3 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -3,13 +3,11 @@ package netlink import ( "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) -const FibRuleInvert = 0x2 - // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func RuleAdd(rule *Rule) error { @@ -19,7 +17,7 @@ func RuleAdd(rule *Rule) error { // RuleAdd adds a rule to the system. // Equivalent to: ip rule add func (h *Handle) RuleAdd(rule *Rule) error { - req := h.newNetlinkRequest(unix.RTM_NEWRULE, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return ruleHandle(rule, req) } @@ -32,31 +30,18 @@ func RuleDel(rule *Rule) error { // RuleDel deletes a rule from the system. // Equivalent to: ip rule del func (h *Handle) RuleDel(rule *Rule) error { - req := h.newNetlinkRequest(unix.RTM_DELRULE, unix.NLM_F_ACK) + req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) return ruleHandle(rule, req) } func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg := nl.NewRtMsg() - msg.Family = unix.AF_INET - msg.Protocol = unix.RTPROT_BOOT - msg.Scope = unix.RT_SCOPE_UNIVERSE - msg.Table = unix.RT_TABLE_UNSPEC - msg.Type = unix.RTN_UNSPEC - if req.NlMsghdr.Flags&unix.NLM_F_CREATE > 0 { - msg.Type = unix.RTN_UNICAST - } - if rule.Invert { - msg.Flags |= FibRuleInvert - } + msg.Family = syscall.AF_INET if rule.Family != 0 { msg.Family = uint8(rule.Family) } - if rule.Table >= 0 && rule.Table < 256 { - msg.Table = uint8(rule.Table) - } - var dstFamily uint8 + var rtAttrs []*nl.RtAttr if rule.Dst != nil && rule.Dst.IP != nil { dstLen, _ := rule.Dst.Mask.Size() @@ -64,12 +49,12 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) dstFamily = msg.Family var dstData []byte - if msg.Family == unix.AF_INET { + if msg.Family == syscall.AF_INET { dstData = rule.Dst.IP.To4() } else { dstData = rule.Dst.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_DST, dstData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) } if rule.Src != nil && rule.Src.IP != nil { @@ -80,12 +65,19 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { srcLen, _ := rule.Src.Mask.Size() msg.Src_len = uint8(srcLen) var srcData []byte - if msg.Family == unix.AF_INET { + if msg.Family == syscall.AF_INET { srcData = rule.Src.IP.To4() } else { srcData = rule.Src.IP.To16() } - rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_SRC, srcData)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) + } + + if rule.Table >= 0 { + msg.Table = uint8(rule.Table) + if rule.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + } } req.AddData(msg) @@ -150,7 +142,7 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) return err } @@ -163,11 +155,11 @@ func RuleList(family int) ([]Rule, error) { // RuleList lists rules in the system. // Equivalent to: ip rule list func (h *Handle) RuleList(family int) ([]Rule, error) { - req := h.newNetlinkRequest(unix.RTM_GETRULE, unix.NLM_F_DUMP|unix.NLM_F_REQUEST) + req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWRULE) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) if err != nil { return nil, err } @@ -183,11 +175,9 @@ func (h *Handle) RuleList(family int) ([]Rule, error) { rule := NewRule() - rule.Invert = msg.Flags&FibRuleInvert > 0 - for j := range attrs { switch attrs[j].Attr.Type { - case unix.RTA_TABLE: + case syscall.RTA_TABLE: rule.Table = int(native.Uint32(attrs[j].Value[0:4])) case nl.FRA_SRC: rule.Src = &net.IPNet{ diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go index 99e9fb4d897..b42b84f0cfe 100644 --- a/vendor/github.com/vishvananda/netlink/socket_linux.go +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -4,9 +4,9 @@ import ( "errors" "fmt" "net" + "syscall" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) const ( @@ -123,15 +123,15 @@ func SocketGet(local, remote net.Addr) (*Socket, error) { return nil, ErrNotImplemented } - s, err := nl.Subscribe(unix.NETLINK_INET_DIAG) + s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG) if err != nil { return nil, err } defer s.Close() req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) req.AddData(&socketRequest{ - Family: unix.AF_INET, - Protocol: unix.IPPROTO_TCP, + Family: syscall.AF_INET, + Protocol: syscall.IPPROTO_TCP, ID: SocketID{ SourcePort: uint16(localTCP.Port), DestinationPort: uint16(remoteTCP.Port), diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm.go index 02b41842e10..9962dcf7006 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm.go +++ b/vendor/github.com/vishvananda/netlink/xfrm.go @@ -2,20 +2,19 @@ package netlink import ( "fmt" - - "golang.org/x/sys/unix" + "syscall" ) // Proto is an enum representing an ipsec protocol. type Proto uint8 const ( - XFRM_PROTO_ROUTE2 Proto = unix.IPPROTO_ROUTING - XFRM_PROTO_ESP Proto = unix.IPPROTO_ESP - XFRM_PROTO_AH Proto = unix.IPPROTO_AH - XFRM_PROTO_HAO Proto = unix.IPPROTO_DSTOPTS + XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP + XFRM_PROTO_AH Proto = syscall.IPPROTO_AH + XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin - XFRM_PROTO_IPSEC_ANY Proto = unix.IPPROTO_RAW + XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW ) func (p Proto) String() string { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go index efe72ddf29c..7b98c9cb6d3 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -2,10 +2,11 @@ package netlink import ( "fmt" + "syscall" + + "github.com/vishvananda/netns" "github.com/vishvananda/netlink/nl" - "github.com/vishvananda/netns" - "golang.org/x/sys/unix" ) type XfrmMsg interface { @@ -38,7 +39,7 @@ func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error if err != nil { return nil } - s, err := nl.SubscribeAt(netns.None(), netns.None(), unix.NETLINK_XFRM, groups...) + s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...) if err != nil { return err } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index fde0c2ca5ad..c3d4e422272 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -1,8 +1,9 @@ package netlink import ( + "syscall" + "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { @@ -54,7 +55,7 @@ func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { } func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := &nl.XfrmUserpolicyInfo{} selFromPolicy(&msg.Sel, policy) @@ -90,7 +91,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { req.AddData(out) } - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } @@ -120,12 +121,12 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { // Equivalent to: `ip xfrm policy show`. // The list can be filtered by ip family. func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) msg := nl.NewIfInfomsg(family) req.AddData(msg) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) if err != nil { return nil, err } @@ -164,13 +165,13 @@ func XfrmPolicyFlush() error { // XfrmPolicyFlush will flush the policies on the system. // Equivalent to: `ip xfrm policy flush` func (h *Handle) XfrmPolicyFlush() error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, unix.NLM_F_ACK) - _, err := req.Execute(unix.NETLINK_XFRM, 0) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) msg := &nl.XfrmUserpolicyId{} selFromPolicy(&msg.Sel, policy) @@ -188,7 +189,7 @@ func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPo resType = 0 } - msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go index d14740dc55b..368a9b986d6 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -3,7 +3,6 @@ package netlink import ( "fmt" "net" - "time" ) // XfrmStateAlgo represents the algorithm to use for the ipsec encryption. @@ -68,19 +67,6 @@ type XfrmStateLimits struct { TimeUseHard uint64 } -// XfrmStateStats represents the current number of bytes/packets -// processed by this State, the State's installation and first use -// time and the replay window counters. -type XfrmStateStats struct { - ReplayWindow uint32 - Replay uint32 - Failed uint32 - Bytes uint64 - Packets uint64 - AddTime uint64 - UseTime uint64 -} - // XfrmState represents the state of an ipsec policy. It optionally // contains an XfrmStateAlgo for encryption and one for authentication. type XfrmState struct { @@ -92,7 +78,6 @@ type XfrmState struct { Reqid int ReplayWindow int Limits XfrmStateLimits - Statistics XfrmStateStats Mark *XfrmMark Auth *XfrmStateAlgo Crypt *XfrmStateAlgo @@ -109,16 +94,10 @@ func (sa XfrmState) Print(stats bool) string { if !stats { return sa.String() } - at := time.Unix(int64(sa.Statistics.AddTime), 0).Format(time.UnixDate) - ut := "-" - if sa.Statistics.UseTime > 0 { - ut = time.Unix(int64(sa.Statistics.UseTime), 0).Format(time.UnixDate) - } - return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d, Bytes: %d, Packets: %d, "+ - "AddTime: %s, UseTime: %s, ReplayWindow: %d, Replay: %d, Failed: %d", + + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d", sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), - sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard, sa.Statistics.Bytes, sa.Statistics.Packets, at, ut, - sa.Statistics.ReplayWindow, sa.Statistics.Replay, sa.Statistics.Failed) + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard) } func printLimit(lmt uint64) string { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 7fc92900c05..6a7bc0deca2 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -2,10 +2,10 @@ package netlink import ( "fmt" + "syscall" "unsafe" "github.com/vishvananda/netlink/nl" - "golang.org/x/sys/unix" ) func writeStateAlgo(a *XfrmStateAlgo) []byte { @@ -111,7 +111,7 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { if state.Spi == 0 { return fmt.Errorf("Spi must be set when adding xfrm state.") } - req := h.newNetlinkRequest(nlProto, unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := xfrmUsersaInfoFromXfrmState(state) @@ -157,13 +157,13 @@ func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { req.AddData(out) } - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) return err } func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, - unix.NLM_F_CREATE|unix.NLM_F_EXCL|unix.NLM_F_ACK) + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) msg := &nl.XfrmUserSpiInfo{} msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) @@ -177,7 +177,7 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { req.AddData(out) } - msgs, err := req.Execute(unix.NETLINK_XFRM, 0) + msgs, err := req.Execute(syscall.NETLINK_XFRM, 0) if err != nil { return nil, err } @@ -216,9 +216,9 @@ func XfrmStateList(family int) ([]XfrmState, error) { // Equivalent to: `ip xfrm state show`. // The list can be filtered by ip family. func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { - req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, unix.NLM_F_DUMP) + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) - msgs, err := req.Execute(unix.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) if err != nil { return nil, err } @@ -255,7 +255,7 @@ func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { } func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { - req := h.newNetlinkRequest(nlProto, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) msg := &nl.XfrmUsersaId{} msg.Family = uint16(nl.GetIPFamily(state.Dst)) @@ -278,7 +278,7 @@ func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState resType = 0 } - msgs, err := req.Execute(unix.NETLINK_XFRM, uint16(resType)) + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) if err != nil { return nil, err } @@ -308,7 +308,6 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.Reqid = int(msg.Reqid) state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) - curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) return &state } @@ -387,11 +386,11 @@ func XfrmStateFlush(proto Proto) error { // proto = 0 means any transformation protocols // Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` func (h *Handle) XfrmStateFlush(proto Proto) error { - req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, unix.NLM_F_ACK) + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK) req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) - _, err := req.Execute(unix.NETLINK_XFRM, 0) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) if err != nil { return err } @@ -430,16 +429,6 @@ func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) } -func curToStats(cur *nl.XfrmLifetimeCur, wstats *nl.XfrmStats, stats *XfrmStateStats) { - stats.Bytes = cur.Bytes - stats.Packets = cur.Packets - stats.AddTime = cur.AddTime - stats.UseTime = cur.UseTime - stats.ReplayWindow = wstats.ReplayWindow - stats.Replay = wstats.Replay - stats.Failed = wstats.IntegrityFailed -} - func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg := &nl.XfrmUsersaInfo{} msg.Family = uint16(nl.GetIPFamily(state.Dst))