diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 6ceaf567a0d..67025802af0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1555,8 +1555,8 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Comment": "v4.0.0-3-g36442dbdb58521", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Comment": "v4.1.0-11-gd4020504c68b6b", + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/exponent-io/jsonpath", @@ -4120,6 +4120,10 @@ "ImportPath": "k8s.io/utils/nsenter", "Rev": "ed37f7428a91fc2a81070808937195dcd46d320e" }, + { + "ImportPath": "k8s.io/utils/path", + "Rev": "ed37f7428a91fc2a81070808937195dcd46d320e" + }, { "ImportPath": "k8s.io/utils/pointer", "Rev": "ed37f7428a91fc2a81070808937195dcd46d320e" diff --git a/Godeps/LICENSES b/Godeps/LICENSES index 2da5c937067..37107bdfe07 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -116633,6 +116633,216 @@ third-party archives. ================================================================================ +================================================================================ += vendor/k8s.io/utils/path licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/k8s.io/utils/LICENSE 3b83ef96387f14655fc854ddc3c6bd57 +================================================================================ + + ================================================================================ = vendor/k8s.io/utils/pointer licensed under: = diff --git a/build/debian-base/Makefile b/build/debian-base/Makefile index d91a6857cee..2475a4dc449 100755 --- a/build/debian-base/Makefile +++ b/build/debian-base/Makefile @@ -18,7 +18,7 @@ REGISTRY ?= staging-k8s.gcr.io IMAGE ?= $(REGISTRY)/debian-base BUILD_IMAGE ?= debian-build -TAG ?= 0.4.0 +TAG ?= 0.4.1 TAR_FILE ?= rootfs.tar ARCH?=amd64 diff --git a/build/debian-hyperkube-base/Makefile b/build/debian-hyperkube-base/Makefile index 39e6e4887df..7fe71705154 100644 --- a/build/debian-hyperkube-base/Makefile +++ b/build/debian-hyperkube-base/Makefile @@ -19,12 +19,12 @@ REGISTRY?=staging-k8s.gcr.io IMAGE?=$(REGISTRY)/debian-hyperkube-base -TAG=0.12.0 +TAG=0.12.1 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x CACHEBUST?=1 -BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.0 +BASEIMAGE=k8s.gcr.io/debian-base-$(ARCH):0.4.1 CNI_VERSION=v0.6.0 TEMP_DIR:=$(shell mktemp -d) diff --git a/build/debian-iptables/Makefile b/build/debian-iptables/Makefile index 2061fe683f0..b33cd4b550a 100644 --- a/build/debian-iptables/Makefile +++ b/build/debian-iptables/Makefile @@ -16,12 +16,12 @@ REGISTRY?="staging-k8s.gcr.io" IMAGE=$(REGISTRY)/debian-iptables -TAG?=v11.0 +TAG?=v11.0.1 ARCH?=amd64 ALL_ARCH = amd64 arm arm64 ppc64le s390x TEMP_DIR:=$(shell mktemp -d) -BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.0 +BASEIMAGE?=k8s.gcr.io/debian-base-$(ARCH):0.4.1 # This option is for running docker manifest command export DOCKER_CLI_EXPERIMENTAL := enabled diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index b51f5fb8454..338756aa1c4 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -33,7 +33,7 @@ rules: - apiGroups: [""] resources: ["replicationcontrollers/scale"] verbs: ["get", "update"] - - apiGroups: ["extensions"] + - apiGroups: ["extensions","apps"] resources: ["deployments/scale", "replicasets/scale"] verbs: ["get", "update"] # Remove the configmaps rule once below issue is fixed: @@ -85,7 +85,7 @@ spec: fsGroup: 65534 containers: - name: autoscaler - image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.3.0 + image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 resources: requests: cpu: "20m" diff --git a/cmd/kube-proxy/app/BUILD b/cmd/kube-proxy/app/BUILD index acc9a87d6ca..597f8eb96b2 100644 --- a/cmd/kube-proxy/app/BUILD +++ b/cmd/kube-proxy/app/BUILD @@ -22,6 +22,7 @@ go_library( "//pkg/kubelet/qos:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/proxy:go_default_library", + "//pkg/proxy/apis:go_default_library", "//pkg/proxy/apis/config:go_default_library", "//pkg/proxy/apis/config/scheme:go_default_library", "//pkg/proxy/apis/config/validation:go_default_library", diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 9074b7f99e5..a4c537d6b1c 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -53,6 +53,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/proxy" + "k8s.io/kubernetes/pkg/proxy/apis" kubeproxyconfig "k8s.io/kubernetes/pkg/proxy/apis/config" "k8s.io/kubernetes/pkg/proxy/apis/config/scheme" "k8s.io/kubernetes/pkg/proxy/apis/config/validation" @@ -583,7 +584,7 @@ func (s *ProxyServer) Run() error { informerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod, informers.WithTweakListOptions(func(options *v1meta.ListOptions) { - options.LabelSelector = "!service.kubernetes.io/service-proxy-name" + options.LabelSelector = "!" + apis.LabelServiceProxyName })) // Create configs (i.e. Watches for Services and Endpoints) diff --git a/cmd/kubeadm/.import-restrictions b/cmd/kubeadm/.import-restrictions index c37f6c23f59..797fdbaa4f3 100644 --- a/cmd/kubeadm/.import-restrictions +++ b/cmd/kubeadm/.import-restrictions @@ -41,6 +41,7 @@ "AllowedPrefixes": [ "k8s.io/utils/exec", "k8s.io/utils/integer", + "k8s.io/utils/path", "k8s.io/utils/pointer" ] }, @@ -77,7 +78,6 @@ "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/serviceaccount", - "k8s.io/kubernetes/pkg/util/file", "k8s.io/kubernetes/pkg/util/hash", "k8s.io/kubernetes/pkg/util/initsystem", "k8s.io/kubernetes/pkg/util/ipvs", diff --git a/cmd/kubeadm/app/cmd/join.go b/cmd/kubeadm/app/cmd/join.go index 313a4a979e3..1caed0c0b0f 100644 --- a/cmd/kubeadm/app/cmd/join.go +++ b/cmd/kubeadm/app/cmd/join.go @@ -17,7 +17,6 @@ limitations under the License. package cmd import ( - "bytes" "fmt" "io" "os" @@ -38,6 +37,7 @@ import ( kubeadmapiv1beta1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1" "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" + "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" @@ -68,17 +68,6 @@ var ( `) - notReadyToJoinControPlaneTemp = template.Must(template.New("join").Parse(dedent.Dedent(` - One or more conditions for hosting a new control plane instance is not satisfied. - - {{.Error}} - - Please ensure that: - * The cluster has a stable controlPlaneEndpoint address. - * The certificates that must be shared among control plane instances are provided. - - `))) - joinControPlaneDoneTemp = template.Must(template.New("join").Parse(dedent.Dedent(` This node has joined the cluster and a new control plane instance was created: @@ -210,9 +199,7 @@ func NewCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { AddJoinConfigFlags(cmd.Flags(), joinOptions.externalcfg) AddJoinOtherFlags(cmd.Flags(), &joinOptions.cfgPath, &joinOptions.ignorePreflightErrors, &joinOptions.controlPlane, &joinOptions.token) - // initialize the workflow runner with the list of phases - // TODO: append phases here like so: - // joinRunner.AppendPhase(phases.NewPreflightMasterPhase()) + joinRunner.AppendPhase(phases.NewPreflightJoinPhase()) // sets the data builder function, that will be used by the runner // both when running the entire workflow or single phases @@ -445,14 +432,6 @@ func (j *joinData) OutputWriter() io.Writer { // Run executes worker node provisioning and tries to join an existing cluster. func (j *joinData) Run() error { - fmt.Println("[preflight] Running pre-flight checks") - - // Start with general checks - klog.V(1).Infoln("[preflight] Running general checks") - if err := preflight.RunJoinNodeChecks(utilsexec.New(), j.cfg, j.ignorePreflightErrors); err != nil { - return err - } - // Fetch the init configuration based on the join configuration. // TODO: individual phases should call these: // - phases that need initCfg should call joinData.InitCfg(). @@ -465,35 +444,7 @@ func (j *joinData) Run() error { if err != nil { return err } - - // Continue with more specific checks based on the init configuration - klog.V(1).Infoln("[preflight] Running configuration dependant checks") - if err := preflight.RunOptionalJoinNodeChecks(utilsexec.New(), &initCfg.ClusterConfiguration, j.ignorePreflightErrors); err != nil { - return err - } - if j.cfg.ControlPlane != nil { - // Checks if the cluster configuration supports - // joining a new control plane instance and if all the necessary certificates are provided - if err := j.CheckIfReadyForAdditionalControlPlane(&initCfg.ClusterConfiguration); err != nil { - // outputs the not ready for hosting a new control plane instance message - ctx := map[string]string{ - "Error": err.Error(), - } - - var msg bytes.Buffer - notReadyToJoinControPlaneTemp.Execute(&msg, ctx) - return errors.New(msg.String()) - } - - // run kubeadm init preflight checks for checking all the prequisites - fmt.Println("[join] Running pre-flight checks before initializing the new control plane instance") - preflight.RunInitMasterChecks(utilsexec.New(), initCfg, j.ignorePreflightErrors) - - fmt.Println("[join] Pulling control-plane images") - if err := preflight.RunPullImagesCheck(utilsexec.New(), initCfg, j.ignorePreflightErrors); err != nil { - return err - } // Prepares the node for hosting a new control plane instance by writing necessary // kubeconfig files, and static pod manifests if err := j.PrepareForHostingControlPlane(initCfg); err != nil { @@ -539,22 +490,6 @@ func (j *joinData) Run() error { return nil } -// CheckIfReadyForAdditionalControlPlane ensures that the cluster is in a state that supports -// joining an additional control plane instance and if the node is ready to join -func (j *joinData) CheckIfReadyForAdditionalControlPlane(cfg *kubeadmapi.ClusterConfiguration) error { - // blocks if the cluster was created without a stable control plane endpoint - if cfg.ControlPlaneEndpoint == "" { - return errors.New("unable to add a new control plane instance a cluster that doesn't have a stable controlPlaneEndpoint address") - } - - // checks if the certificates that must be equal across contolplane instances are provided - if ret, err := certsphase.SharedCertificateExists(cfg); !ret { - return err - } - - return nil -} - // PrepareForHostingControlPlane makes all preparation activities require for a node hosting a new control plane instance func (j *joinData) PrepareForHostingControlPlane(initConfiguration *kubeadmapi.InitConfiguration) error { diff --git a/cmd/kubeadm/app/cmd/phases/preflight.go b/cmd/kubeadm/app/cmd/phases/preflight.go index 8884a9fe864..35c9e63e24e 100644 --- a/cmd/kubeadm/app/cmd/phases/preflight.go +++ b/cmd/kubeadm/app/cmd/phases/preflight.go @@ -17,23 +17,43 @@ limitations under the License. package phases import ( + "bytes" "fmt" + "text/template" + "github.com/lithammer/dedent" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases/workflow" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" "k8s.io/kubernetes/pkg/util/normalizer" utilsexec "k8s.io/utils/exec" ) var ( - masterPreflightExample = normalizer.Examples(` + initPreflightExample = normalizer.Examples(` # Run master pre-flight checks using a config file. kubeadm init phase preflight --config kubeadm-config.yml `) + joinPreflightExample = normalizer.Examples(` + # Run join pre-flight checks using a config file. + kubeadm join phase preflight --config kubeadm-config.yml + `) + + notReadyToJoinControPlaneTemp = template.Must(template.New("join").Parse(dedent.Dedent(` + One or more conditions for hosting a new control plane instance is not satisfied. + + {{.Error}} + + Please ensure that: + * The cluster has a stable controlPlaneEndpoint address. + * The certificates that must be shared among control plane instances are provided. + + `))) ) // preflightMasterData defines the behavior that a runtime data struct passed to the PreflightMaster master phase @@ -45,13 +65,19 @@ type preflightMasterData interface { IgnorePreflightErrors() sets.String } +type preflightJoinData interface { + Cfg() *kubeadmapi.JoinConfiguration + InitCfg() (*kubeadmapi.InitConfiguration, error) + IgnorePreflightErrors() sets.String +} + // NewPreflightMasterPhase creates a kubeadm workflow phase that implements preflight checks for a new master node. func NewPreflightMasterPhase() workflow.Phase { return workflow.Phase{ Name: "preflight", Short: "Run master pre-flight checks", Long: "Run master pre-flight checks, functionally equivalent to what implemented by kubeadm init.", - Example: masterPreflightExample, + Example: initPreflightExample, Run: runPreflightMaster, InheritFlags: []string{ options.CfgPath, @@ -60,6 +86,7 @@ func NewPreflightMasterPhase() workflow.Phase { } } +// TODO(dmaiocchi): rename all instances of master to controlPlane in this file. // runPreflightMaster executes preflight checks logic. func runPreflightMaster(c workflow.RunData) error { data, ok := c.(preflightMasterData) @@ -85,3 +112,96 @@ func runPreflightMaster(c workflow.RunData) error { return nil } + +// NewPreflightJoinPhase creates a kubeadm workflow phase that implements preflight checks for a new node join +func NewPreflightJoinPhase() workflow.Phase { + return workflow.Phase{ + Name: "preflight", + Short: "Run join pre-flight checks", + Long: "Run join pre-flight checks, functionally equivalent to what is implemented by kubeadm join.", + Example: joinPreflightExample, + Run: runPreflightJoin, + InheritFlags: []string{ + options.CfgPath, + options.IgnorePreflightErrors, + options.TLSBootstrapToken, + options.TokenStr, + options.ControlPlane, + options.APIServerAdvertiseAddress, + options.APIServerBindPort, + options.NodeCRISocket, + options.NodeName, + options.FileDiscovery, + options.TokenDiscovery, + options.TokenDiscoveryCAHash, + options.TokenDiscoverySkipCAHash, + }, + } +} + +// runPreflightJoin executes preflight checks logic. +func runPreflightJoin(c workflow.RunData) error { + j, ok := c.(preflightJoinData) + if !ok { + return errors.New("preflight phase invoked with an invalid data struct") + } + // Start with general checks + klog.V(1).Infoln("[preflight] Running general checks") + if err := preflight.RunJoinNodeChecks(utilsexec.New(), j.Cfg(), j.IgnorePreflightErrors()); err != nil { + return err + } + + initCfg, err := j.InitCfg() + if err != nil { + return err + } + + // Continue with more specific checks based on the init configuration + klog.V(1).Infoln("[preflight] Running configuration dependant checks") + if err := preflight.RunOptionalJoinNodeChecks(utilsexec.New(), &initCfg.ClusterConfiguration, j.IgnorePreflightErrors()); err != nil { + return err + } + + if j.Cfg().ControlPlane != nil { + // Checks if the cluster configuration supports + // joining a new control plane instance and if all the necessary certificates are provided + if err := checkIfReadyForAdditionalControlPlane(&initCfg.ClusterConfiguration); err != nil { + // outputs the not ready for hosting a new control plane instance message + ctx := map[string]string{ + "Error": err.Error(), + } + + var msg bytes.Buffer + notReadyToJoinControPlaneTemp.Execute(&msg, ctx) + return errors.New(msg.String()) + } + + // run kubeadm init preflight checks for checking all the prequisites + fmt.Println("[preflight] Running pre-flight checks before initializing the new control plane instance") + if err := preflight.RunInitMasterChecks(utilsexec.New(), initCfg, j.IgnorePreflightErrors()); err != nil { + return err + } + + fmt.Println("[preflight] Pulling control-plane images") + if err := preflight.RunPullImagesCheck(utilsexec.New(), initCfg, j.IgnorePreflightErrors()); err != nil { + return err + } + } + return nil +} + +// checkIfReadyForAdditionalControlPlane ensures that the cluster is in a state that supports +// joining an additional control plane instance and if the node is ready to preflight +func checkIfReadyForAdditionalControlPlane(initConfiguration *kubeadmapi.ClusterConfiguration) error { + // blocks if the cluster was created without a stable control plane endpoint + if initConfiguration.ControlPlaneEndpoint == "" { + return errors.New("unable to add a new control plane instance a cluster that doesn't have a stable controlPlaneEndpoint address") + } + + // checks if the certificates that must be equal across contolplane instances are provided + if ret, err := certs.SharedCertificateExists(initConfiguration); !ret { + return err + } + + return nil +} diff --git a/cmd/kubeadm/app/preflight/utils.go b/cmd/kubeadm/app/preflight/utils.go index a7831de2d30..9cf1387271a 100644 --- a/cmd/kubeadm/app/preflight/utils.go +++ b/cmd/kubeadm/app/preflight/utils.go @@ -33,7 +33,7 @@ func GetKubeletVersion(execer utilsexec.Interface) (*version.Version, error) { command := execer.Command("kubelet", "--version") out, err := command.CombinedOutput() if err != nil { - return nil, err + return nil, errors.Wrap(err, "cannot execute 'kubelet --version'") } cleanOutput := strings.TrimSpace(string(out)) diff --git a/cmd/kubeadm/app/util/system/docker_validator_test.go b/cmd/kubeadm/app/util/system/docker_validator_test.go index 27e94e91308..adc949d57dd 100644 --- a/cmd/kubeadm/app/util/system/docker_validator_test.go +++ b/cmd/kubeadm/app/util/system/docker_validator_test.go @@ -28,7 +28,7 @@ func TestValidateDockerInfo(t *testing.T) { Reporter: DefaultReporter, } spec := &DockerSpec{ - Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.06\..*`}, + Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.0[6,9]\..*`}, GraphDriver: []string{"driver_1", "driver_2"}, } for _, test := range []struct { diff --git a/cmd/kubeadm/app/util/system/types_unix.go b/cmd/kubeadm/app/util/system/types_unix.go index 71a26c0e6e4..3f45d957e50 100644 --- a/cmd/kubeadm/app/util/system/types_unix.go +++ b/cmd/kubeadm/app/util/system/types_unix.go @@ -62,7 +62,7 @@ var DefaultSysSpec = SysSpec{ Cgroups: []string{"cpu", "cpuacct", "cpuset", "devices", "freezer", "memory"}, RuntimeSpec: RuntimeSpec{ DockerSpec: &DockerSpec{ - Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.06\..*`}, + Version: []string{`1\.1[1-3]\..*`, `17\.0[3,6,9]\..*`, `18\.0[6,9]\..*`}, GraphDriver: []string{"aufs", "overlay", "overlay2", "devicemapper", "zfs"}, }, }, diff --git a/cmd/kubeadm/app/util/system/types_windows.go b/cmd/kubeadm/app/util/system/types_windows.go index 34d89f2bd90..0d045699cf9 100644 --- a/cmd/kubeadm/app/util/system/types_windows.go +++ b/cmd/kubeadm/app/util/system/types_windows.go @@ -38,7 +38,7 @@ var DefaultSysSpec = SysSpec{ Cgroups: []string{}, RuntimeSpec: RuntimeSpec{ DockerSpec: &DockerSpec{ - Version: []string{`18\.06\..*`}, //Requires [18.06] or later + Version: []string{`18\.0[6,9]\..*`}, GraphDriver: []string{"windowsfilter"}, }, }, diff --git a/hack/.golint_failures b/hack/.golint_failures index 9af4827aeeb..171a726ab89 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -355,7 +355,6 @@ pkg/util/bandwidth pkg/util/config pkg/util/ebtables pkg/util/env -pkg/util/file pkg/util/goroutinemap/exponentialbackoff pkg/util/initsystem pkg/util/iptables diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 2ef9f636eb8..45e568500c2 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -158,11 +158,6 @@ ./pkg/kubectl/cmd/edit/testdata/record_testcase.sh ./pkg/util/verify-util-pkg.sh ./plugin/pkg/admission/imagepolicy/gencerts.sh -./staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/update-codegen.sh -./staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/verify-codegen.sh -./staging/src/k8s.io/apiextensions-apiserver/hack/build-image.sh -./staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh -./staging/src/k8s.io/apiextensions-apiserver/hack/verify-codegen.sh ./staging/src/k8s.io/code-generator/generate-groups.sh ./staging/src/k8s.io/code-generator/generate-internal-groups.sh ./staging/src/k8s.io/code-generator/hack/update-codegen.sh @@ -178,9 +173,6 @@ ./staging/src/k8s.io/metrics/hack/verify-codegen.sh ./staging/src/k8s.io/node-api/hack/update-codegen.sh ./staging/src/k8s.io/node-api/hack/verify-codegen.sh -./staging/src/k8s.io/sample-apiserver/hack/build-image.sh -./staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh -./staging/src/k8s.io/sample-apiserver/hack/verify-codegen.sh ./test/cmd/apply.sh ./test/cmd/apps.sh ./test/cmd/authorization.sh diff --git a/hack/lib/util.sh b/hack/lib/util.sh index cf6bb0edb0e..642b7c5e316 100755 --- a/hack/lib/util.sh +++ b/hack/lib/util.sh @@ -697,6 +697,15 @@ function kube::util::ensure-cfssl { return 0 fi + host_arch=$(kube::util::host_arch) + + if [[ "${host_arch}" != "amd64" ]]; then + echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed." + echo "Please install cfssl and cfssljson and verify they are in \$PATH." + echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..." + exit 1 + fi + # Create a temp dir for cfssl if no directory was given local cfssldir=${1:-} if [[ -z "${cfssldir}" ]]; then diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index e1c49962610..0ba8ef21006 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -78,6 +78,7 @@ CLOUD_PROVIDER=${CLOUD_PROVIDER:-""} CLOUD_CONFIG=${CLOUD_CONFIG:-""} FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"} STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"} +STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-""} # preserve etcd data. you also need to set ETCD_DIR. PRESERVE_ETCD="${PRESERVE_ETCD:-false}" # enable Pod priority and preemption @@ -574,6 +575,7 @@ function start_apiserver { --insecure-bind-address="${API_HOST_IP}" \ --insecure-port="${API_PORT}" \ --storage-backend=${STORAGE_BACKEND} \ + --storage-media-type=${STORAGE_MEDIA_TYPE} \ --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \ --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \ --feature-gates="${FEATURE_GATES}" \ diff --git a/pkg/cloudprovider/providers/.import-restrictions b/pkg/cloudprovider/providers/.import-restrictions index 301698f8512..18f11cfbbb2 100644 --- a/pkg/cloudprovider/providers/.import-restrictions +++ b/pkg/cloudprovider/providers/.import-restrictions @@ -5,31 +5,18 @@ "AllowedPrefixes": [ "k8s.io/kubernetes/pkg/api/legacyscheme", "k8s.io/kubernetes/pkg/api/service", - "k8s.io/kubernetes/pkg/api/v1/pod", "k8s.io/kubernetes/pkg/api/v1/service", - "k8s.io/kubernetes/pkg/apis/apps", - "k8s.io/kubernetes/pkg/apis/autoscaling", "k8s.io/kubernetes/pkg/apis/core", - "k8s.io/kubernetes/pkg/apis/scheduling", - "k8s.io/kubernetes/pkg/capabilities", "k8s.io/kubernetes/pkg/cloudprovider", - "k8s.io/kubernetes/pkg/controller", "k8s.io/kubernetes/pkg/credentialprovider", "k8s.io/kubernetes/pkg/features", - "k8s.io/kubernetes/pkg/fieldpath", "k8s.io/kubernetes/pkg/kubelet/apis", - "k8s.io/kubernetes/pkg/kubelet/types", "k8s.io/kubernetes/pkg/master/ports", - "k8s.io/kubernetes/pkg/security/apparmor", - "k8s.io/kubernetes/pkg/serviceaccount", - "k8s.io/kubernetes/pkg/util/file", - "k8s.io/kubernetes/pkg/util/hash", "k8s.io/kubernetes/pkg/util/mount", + "k8s.io/kubernetes/pkg/util/file", "k8s.io/kubernetes/pkg/util/net/sets", - "k8s.io/kubernetes/pkg/util/parsers", "k8s.io/kubernetes/pkg/util/resizefs", "k8s.io/kubernetes/pkg/util/strings", - "k8s.io/kubernetes/pkg/util/taints", "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/pkg/volume" ], diff --git a/pkg/cloudprovider/providers/aws/BUILD b/pkg/cloudprovider/providers/aws/BUILD index a53e5242067..cb19345627d 100644 --- a/pkg/cloudprovider/providers/aws/BUILD +++ b/pkg/cloudprovider/providers/aws/BUILD @@ -28,7 +28,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", deps = [ "//pkg/api/v1/service:go_default_library", - "//pkg/controller:go_default_library", "//pkg/credentialprovider/aws:go_default_library", "//pkg/kubelet/apis:go_default_library", "//pkg/version:go_default_library", @@ -45,6 +44,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/credentials:go_default_library", diff --git a/pkg/cloudprovider/providers/aws/aws.go b/pkg/cloudprovider/providers/aws/aws.go index c1add689605..a335e640dd0 100644 --- a/pkg/cloudprovider/providers/aws/aws.go +++ b/pkg/cloudprovider/providers/aws/aws.go @@ -56,9 +56,9 @@ import ( "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/record" - "k8s.io/cloud-provider" + cloudprovider "k8s.io/cloud-provider" + nodehelpers "k8s.io/cloud-provider/node/helpers" "k8s.io/kubernetes/pkg/api/v1/service" - "k8s.io/kubernetes/pkg/controller" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume" @@ -1938,7 +1938,7 @@ func (c *Cloud) applyUnSchedulableTaint(nodeName types.NodeName, reason string) Value: "true", Effect: v1.TaintEffectNoSchedule, } - err := controller.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) + err := nodehelpers.AddOrUpdateTaintOnNode(c.kubeClient, string(nodeName), taint) if err != nil { klog.Errorf("Error applying taint to node %s with error %v", nodeName, err) return diff --git a/pkg/controller/.import-restrictions b/pkg/controller/.import-restrictions index 9749301c119..e88de83a1e8 100644 --- a/pkg/controller/.import-restrictions +++ b/pkg/controller/.import-restrictions @@ -306,7 +306,6 @@ "k8s.io/kubernetes/pkg/scheduler/api", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/security/apparmor", - "k8s.io/kubernetes/pkg/util/file", "k8s.io/kubernetes/pkg/util/net/sets", "k8s.io/kubernetes/pkg/util/parsers", "k8s.io/kubernetes/pkg/fieldpath", @@ -339,6 +338,7 @@ "k8s.io/utils/nsenter", "k8s.io/utils/integer", "k8s.io/utils/io", + "k8s.io/utils/path", "k8s.io/utils/pointer", "k8s.io/utils/exec", "k8s.io/utils/strings" diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index bf7019f117c..4a2dd7a1e95 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -964,6 +964,9 @@ func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error { failedPodsObserved += failedPodsObservedOnNode } + // Remove pods assigned to not existing nodes. + podsToDelete = append(podsToDelete, getPodsWithoutNode(nodeList, nodeToDaemonPods)...) + // Label new pods using the hash label value of the current history when creating them if err = dsc.syncNodes(ds, podsToDelete, nodesNeedingDaemonPods, hash); err != nil { return err @@ -1524,3 +1527,21 @@ func (o podByCreationTimestampAndPhase) Less(i, j int) bool { func failedPodsBackoffKey(ds *apps.DaemonSet, nodeName string) string { return fmt.Sprintf("%s/%d/%s", ds.UID, ds.Status.ObservedGeneration, nodeName) } + +// getPodsWithoutNode returns list of pods assigned to not existing nodes. +func getPodsWithoutNode( + runningNodesList []*v1.Node, nodeToDaemonPods map[string][]*v1.Pod) []string { + var results []string + isNodeRunning := make(map[string]bool) + for _, node := range runningNodesList { + isNodeRunning[node.Name] = true + } + for n, pods := range nodeToDaemonPods { + if !isNodeRunning[n] { + for _, pod := range pods { + results = append(results, pod.Name) + } + } + } + return results +} diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go index 59fe7c026b3..a2b43dfd197 100644 --- a/pkg/controller/daemon/daemon_controller_test.go +++ b/pkg/controller/daemon/daemon_controller_test.go @@ -2447,6 +2447,25 @@ func TestDeleteNoDaemonPod(t *testing.T) { } } +func TestDeletePodForNotExistingNode(t *testing.T) { + for _, f := range []bool{true, false} { + defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() + for _, strategy := range updateStrategies() { + ds := newDaemonSet("foo") + ds.Spec.UpdateStrategy = *strategy + manager, podControl, _, err := newTestController(ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + manager.dsStore.Add(ds) + addNodes(manager.nodeStore, 0, 1, nil) + addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1) + addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 1, 0) + } + } +} + func TestGetNodesToDaemonPods(t *testing.T) { for _, f := range []bool{true, false} { defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ScheduleDaemonSetPods, f)() diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 89d67e63521..86acccdf42b 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -121,6 +121,7 @@ const ( // owner: @bsalamat // alpha: v1.8 // beta: v1.11 + // GA: v1.14 // // Add priority to pods. Priority affects scheduling and preemption of pods. PodPriority utilfeature.Feature = "PodPriority" @@ -432,7 +433,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS Sysctls: {Default: true, PreRelease: utilfeature.Beta}, DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, PodShareProcessNamespace: {Default: true, PreRelease: utilfeature.Beta}, - PodPriority: {Default: true, PreRelease: utilfeature.Beta}, + PodPriority: {Default: true, PreRelease: utilfeature.GA}, TaintNodesByCondition: {Default: true, PreRelease: utilfeature.Beta}, MountPropagation: {Default: true, PreRelease: utilfeature.GA, LockToDefault: true}, // remove in 1.14 QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, diff --git a/pkg/kubectl/.import-restrictions b/pkg/kubectl/.import-restrictions index f635e6709f2..cf4ac57da4a 100644 --- a/pkg/kubectl/.import-restrictions +++ b/pkg/kubectl/.import-restrictions @@ -126,7 +126,6 @@ "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/serviceaccount", - "k8s.io/kubernetes/pkg/util/file", "k8s.io/kubernetes/pkg/util/goroutinemap", "k8s.io/kubernetes/pkg/util/hash", "k8s.io/kubernetes/pkg/util/interrupt", @@ -144,6 +143,7 @@ "k8s.io/kubernetes/pkg/volume/util", "k8s.io/utils/nsenter", "k8s.io/utils/io", + "k8s.io/utils/path", "k8s.io/utils/pointer" ], "ForbiddenPrefixes": [] diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index cae1516b80f..6440faa646d 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -22,7 +22,6 @@ import ( "io" "os" "os/exec" - "runtime" "strings" "syscall" @@ -339,12 +338,6 @@ type defaultPluginHandler struct{} // Lookup implements PluginHandler func (h *defaultPluginHandler) Lookup(filename string) (string, error) { - // if on Windows, append the "exe" extension - // to the filename that we are looking up. - if runtime.GOOS == "windows" { - filename = filename + ".exe" - } - return exec.LookPath(filename) } diff --git a/pkg/kubectl/cmd/get/customcolumn.go b/pkg/kubectl/cmd/get/customcolumn.go index a5f6fa6dadf..401c475fa43 100644 --- a/pkg/kubectl/cmd/get/customcolumn.go +++ b/pkg/kubectl/cmd/get/customcolumn.go @@ -75,7 +75,7 @@ func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder, noHea parts := strings.Split(spec, ",") columns := make([]Column, len(parts)) for ix := range parts { - colSpec := strings.Split(parts[ix], ":") + colSpec := strings.SplitN(parts[ix], ":", 2) if len(colSpec) != 2 { return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
:", parts[ix]) } diff --git a/pkg/kubectl/cmd/get/customcolumn_test.go b/pkg/kubectl/cmd/get/customcolumn_test.go index 263ecdde92b..b4af2025ebd 100644 --- a/pkg/kubectl/cmd/get/customcolumn_test.go +++ b/pkg/kubectl/cmd/get/customcolumn_test.go @@ -373,3 +373,198 @@ bar bar bar t.Errorf("\nexpected:\n'%s'\nsaw\n'%s'\n", expectedOutput, buffer.String()) } } + +func TestSliceColumnPrint(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-name", + Namespace: "fake-namespace", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "fake0", + }, + { + Name: "fake1", + }, + { + Name: "fake2", + }, + { + Name: "fake3", + }, + }, + }, + } + + tests := []struct { + name string + spec string + expectedOutput string + expectErr bool + }{ + { + name: "containers[0]", + spec: "CONTAINER:.spec.containers[0].name", + expectedOutput: `CONTAINER +fake0 +`, + expectErr: false, + }, + { + name: "containers[3]", + spec: "CONTAINER:.spec.containers[3].name", + expectedOutput: `CONTAINER +fake3 +`, + expectErr: false, + }, + { + name: "containers[5], illegal expression because it is out of bounds", + spec: "CONTAINER:.spec.containers[5].name", + expectedOutput: "", + expectErr: true, + }, + { + name: "containers[-1], it equals containers[3]", + spec: "CONTAINER:.spec.containers[-1].name", + expectedOutput: `CONTAINER +fake3 +`, + expectErr: false, + }, + { + name: "containers[-2], it equals containers[2]", + spec: "CONTAINER:.spec.containers[-2].name", + expectedOutput: `CONTAINER +fake2 +`, + expectErr: false, + }, + { + name: "containers[-4], it equals containers[0]", + spec: "CONTAINER:.spec.containers[-4].name", + expectedOutput: `CONTAINER +fake0 +`, + expectErr: false, + }, + { + name: "containers[-5], illegal expression because it is out of bounds", + spec: "CONTAINER:.spec.containers[-5].name", + expectedOutput: "", + expectErr: true, + }, + { + name: "containers[0:0], it equals empty set", + spec: "CONTAINER:.spec.containers[0:0].name", + expectedOutput: `CONTAINER + +`, + expectErr: false, + }, + { + name: "containers[0:3]", + spec: "CONTAINER:.spec.containers[0:3].name", + expectedOutput: `CONTAINER +fake0,fake1,fake2 +`, + expectErr: false, + }, + { + name: "containers[1:]", + spec: "CONTAINER:.spec.containers[1:].name", + expectedOutput: `CONTAINER +fake1,fake2,fake3 +`, + expectErr: false, + }, + { + name: "containers[3:1], illegal expression because start index is greater than end index", + spec: "CONTAINER:.spec.containers[3:1].name", + expectedOutput: "", + expectErr: true, + }, + + { + name: "containers[0:-1], it equals containers[0:3]", + spec: "CONTAINER:.spec.containers[0:-1].name", + expectedOutput: `CONTAINER +fake0,fake1,fake2 +`, + expectErr: false, + }, + { + name: "containers[-1:], it equals containers[3:]", + spec: "CONTAINER:.spec.containers[-1:].name", + expectedOutput: `CONTAINER +fake3 +`, + expectErr: false, + }, + { + name: "containers[-4:], it equals containers[0:]", + spec: "CONTAINER:.spec.containers[-4:].name", + expectedOutput: `CONTAINER +fake0,fake1,fake2,fake3 +`, + expectErr: false, + }, + + { + name: "containers[-3:-1], it equasl containers[1:3]", + spec: "CONTAINER:.spec.containers[-3:-1].name", + expectedOutput: `CONTAINER +fake1,fake2 +`, + expectErr: false, + }, + { + name: "containers[-2:-3], it equals containers[2:1], illegal expression because start index is greater than end index", + spec: "CONTAINER:.spec.containers[-2:-3].name", + expectedOutput: "", + expectErr: true, + }, + { + name: "containers[4:4], it equals empty set", + spec: "CONTAINER:.spec.containers[4:4].name", + expectedOutput: `CONTAINER + +`, + expectErr: false, + }, + { + name: "containers[-5:-5], it equals empty set", + spec: "CONTAINER:.spec.containers[-5:-5].name", + expectedOutput: `CONTAINER + +`, + expectErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + printer, err := NewCustomColumnsPrinterFromSpec(test.spec, decoder, false) + if err != nil { + t.Errorf("test %s has unexpected error: %v", test.name, err) + } + + buffer := &bytes.Buffer{} + err = printer.PrintObj(pod, buffer) + if test.expectErr { + if err == nil { + t.Errorf("test %s has unexpected error: %v", test.name, err) + + } + } else { + if err != nil { + t.Errorf("test %s has unexpected error: %v", test.name, err) + } else if buffer.String() != test.expectedOutput { + t.Errorf("test %s has unexpected output:\nexpected: %s\nsaw: %s", test.name, test.expectedOutput, buffer.String()) + } + } + }) + } +} diff --git a/pkg/kubectl/cmd/plugin/plugin.go b/pkg/kubectl/cmd/plugin/plugin.go index d7df928098d..e8463f04552 100644 --- a/pkg/kubectl/cmd/plugin/plugin.go +++ b/pkg/kubectl/cmd/plugin/plugin.go @@ -100,11 +100,7 @@ func (o *PluginListOptions) Complete(cmd *cobra.Command) error { seenPlugins: make(map[string]string, 0), } - path := "PATH" - if runtime.GOOS == "windows" { - path = "path" - } - o.PluginPaths = filepath.SplitList(os.Getenv(path)) + o.PluginPaths = filepath.SplitList(os.Getenv("PATH")) return nil } @@ -230,7 +226,10 @@ func isExecutable(fullPath string) (bool, error) { } if runtime.GOOS == "windows" { - if strings.HasSuffix(info.Name(), ".exe") { + fileExt := strings.ToLower(filepath.Ext(fullPath)) + + switch fileExt { + case ".bat", ".cmd", ".com", ".exe": return true, nil } return false, nil diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index 722309afa4e..ba96674a91a 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -103,7 +103,6 @@ go_library( "//pkg/security/podsecuritypolicy/sysctl:go_default_library", "//pkg/securitycontext:go_default_library", "//pkg/util/dbus:go_default_library", - "//pkg/util/file:go_default_library", "//pkg/util/iptables:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", @@ -150,6 +149,7 @@ go_library( "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index e6e44b4435a..353b7c641ae 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -74,7 +74,6 @@ go_library( "//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/types:go_default_library", - "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/oom:go_default_library", "//pkg/util/procfs:go_default_library", @@ -88,6 +87,7 @@ go_library( "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/cgroups/systemd:go_default_library", "//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//pkg/kubelet/cadvisor:go_default_library", diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index ea0a5ad2151..af7a779173f 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -56,11 +56,11 @@ import ( "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" + utilpath "k8s.io/utils/path" ) const ( @@ -179,11 +179,11 @@ func validateSystemRequirements(mountUtil mount.Interface) (features, error) { // Check if cpu quota is available. // CPU cgroup is required and so it expected to be mounted at this point. - periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us")) + periodExists, err := utilpath.Exists(utilpath.CheckFollowSymlink, path.Join(cpuMountPoint, "cpu.cfs_period_us")) if err != nil { klog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err) } - quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us")) + quotaExists, err := utilpath.Exists(utilpath.CheckFollowSymlink, path.Join(cpuMountPoint, "cpu.cfs_quota_us")) if err != nil { klog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err) } diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index 1386a83ccb6..5331f6418f9 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -30,9 +30,9 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" utilnode "k8s.io/kubernetes/pkg/util/node" + utilpath "k8s.io/utils/path" ) // getRootDir returns the full path to the directory under which kubelet can @@ -293,7 +293,7 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err for _, volumePluginDir := range volumePluginDirs { volumePluginName := volumePluginDir.Name() volumePluginPath := filepath.Join(podVolDir, volumePluginName) - volumeDirs, err := utilfile.ReadDirNoStat(volumePluginPath) + volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath) if err != nil { return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err) } diff --git a/pkg/kubelet/volumemanager/reconciler/BUILD b/pkg/kubelet/volumemanager/reconciler/BUILD index 46865145d4f..38c35e589ba 100644 --- a/pkg/kubelet/volumemanager/reconciler/BUILD +++ b/pkg/kubelet/volumemanager/reconciler/BUILD @@ -14,7 +14,6 @@ go_library( "//pkg/features:go_default_library", "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", - "//pkg/util/file:go_default_library", "//pkg/util/goroutinemap/exponentialbackoff:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", @@ -30,6 +29,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 8f083a075a7..bc13ac8bf59 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/config" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" - utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff" "k8s.io/kubernetes/pkg/util/mount" volumepkg "k8s.io/kubernetes/pkg/volume" @@ -45,6 +44,7 @@ import ( "k8s.io/kubernetes/pkg/volume/util/operationexecutor" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" utilstrings "k8s.io/utils/strings" + utilpath "k8s.io/utils/path" ) // Reconciler runs a periodic loop to reconcile the desired state of the world @@ -673,7 +673,7 @@ func getVolumesFromPodDir(podDir string) ([]podVolume, error) { for _, volumeDir := range volumesDirInfo { pluginName := volumeDir.Name() volumePluginPath := path.Join(volumesDir, pluginName) - volumePluginDirs, err := utilfile.ReadDirNoStat(volumePluginPath) + volumePluginDirs, err := utilpath.ReadDirNoStat(volumePluginPath) if err != nil { klog.Errorf("Could not read volume plugin directory %q: %v", volumePluginPath, err) continue diff --git a/pkg/master/controller/crdregistration/crdregistration_controller.go b/pkg/master/controller/crdregistration/crdregistration_controller.go index 1670a5b0780..50b9f6ea0d4 100644 --- a/pkg/master/controller/crdregistration/crdregistration_controller.go +++ b/pkg/master/controller/crdregistration/crdregistration_controller.go @@ -68,7 +68,7 @@ func NewAutoRegistrationController(crdinformer crdinformers.CustomResourceDefini crdSynced: crdinformer.Informer().HasSynced, apiServiceRegistration: apiServiceRegistration, syncedInitialSet: make(chan struct{}), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd-autoregister"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_autoregistration_controller"), } c.syncHandler = c.handleVersionUpdate diff --git a/pkg/master/tunneler/BUILD b/pkg/master/tunneler/BUILD index 99648d7f8aa..9dbaecd6edb 100644 --- a/pkg/master/tunneler/BUILD +++ b/pkg/master/tunneler/BUILD @@ -22,10 +22,10 @@ go_library( importpath = "k8s.io/kubernetes/pkg/master/tunneler", deps = [ "//pkg/ssh:go_default_library", - "//pkg/util/file:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/klog:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/master/tunneler/ssh.go b/pkg/master/tunneler/ssh.go index 4bf8307bad0..f30a967e34b 100644 --- a/pkg/master/tunneler/ssh.go +++ b/pkg/master/tunneler/ssh.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" "k8s.io/kubernetes/pkg/ssh" - utilfile "k8s.io/kubernetes/pkg/util/file" + utilpath "k8s.io/utils/path" ) type InstallSSHKey func(ctx context.Context, user string, data []byte) error @@ -119,7 +119,7 @@ func (c *SSHTunneler) Run(getAddresses AddressFunc) { // public keyfile is written last, so check for that. publicKeyFile := c.SSHKeyfile + ".pub" - exists, err := utilfile.FileExists(publicKeyFile) + exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, publicKeyFile) if err != nil { klog.Errorf("Error detecting if key exists: %v", err) } else if !exists { @@ -208,7 +208,7 @@ func generateSSHKey(privateKeyfile, publicKeyfile string) error { } // If private keyfile already exists, we must have only made it halfway // through last time, so delete it. - exists, err := utilfile.FileExists(privateKeyfile) + exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, privateKeyfile) if err != nil { klog.Errorf("Error detecting if private key exists: %v", err) } else if exists { diff --git a/pkg/proxy/BUILD b/pkg/proxy/BUILD index bb290126fea..43040e6d813 100644 --- a/pkg/proxy/BUILD +++ b/pkg/proxy/BUILD @@ -38,7 +38,7 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", - "//pkg/proxy/apis/config:all-srcs", + "//pkg/proxy/apis:all-srcs", "//pkg/proxy/config:all-srcs", "//pkg/proxy/healthcheck:all-srcs", "//pkg/proxy/iptables:all-srcs", diff --git a/pkg/proxy/apis/BUILD b/pkg/proxy/apis/BUILD new file mode 100644 index 00000000000..e75326ca83e --- /dev/null +++ b/pkg/proxy/apis/BUILD @@ -0,0 +1,28 @@ +package(default_visibility = ["//visibility:public"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["well_known_labels.go"], + importpath = "k8s.io/kubernetes/pkg/proxy/apis", +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//pkg/proxy/apis/config:all-srcs", + ], + tags = ["automanaged"], +) diff --git a/pkg/proxy/apis/well_known_labels.go b/pkg/proxy/apis/well_known_labels.go new file mode 100644 index 00000000000..84c4b9fa158 --- /dev/null +++ b/pkg/proxy/apis/well_known_labels.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +const ( + // LabelServiceProxyName indicates that an alternative service + // proxy will implement this Service. + LabelServiceProxyName = "service.kubernetes.io/service-proxy-name" +) diff --git a/pkg/scheduler/factory/factory.go b/pkg/scheduler/factory/factory.go index b74a8aa9b65..ca1f0e867fd 100644 --- a/pkg/scheduler/factory/factory.go +++ b/pkg/scheduler/factory/factory.go @@ -1085,6 +1085,7 @@ func MakeDefaultErrorFunc(client clientset.Interface, backoff *util.PodBackoff, } backoff.Gc() + podSchedulingCycle := podQueue.SchedulingCycle() // Retry asynchronously. // Note that this is extremely rudimentary and we need a more real error handling path. go func() { @@ -1110,7 +1111,7 @@ func MakeDefaultErrorFunc(client clientset.Interface, backoff *util.PodBackoff, pod, err := client.CoreV1().Pods(podID.Namespace).Get(podID.Name, metav1.GetOptions{}) if err == nil { if len(pod.Spec.NodeName) == 0 { - podQueue.AddUnschedulableIfNotPresent(pod) + podQueue.AddUnschedulableIfNotPresent(pod, podSchedulingCycle) } break } diff --git a/pkg/scheduler/internal/queue/scheduling_queue.go b/pkg/scheduler/internal/queue/scheduling_queue.go index 16756abf624..bc5130d481b 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue.go +++ b/pkg/scheduler/internal/queue/scheduling_queue.go @@ -59,7 +59,14 @@ const unschedulableQTimeInterval = 60 * time.Second type SchedulingQueue interface { Add(pod *v1.Pod) error AddIfNotPresent(pod *v1.Pod) error - AddUnschedulableIfNotPresent(pod *v1.Pod) error + // AddUnschedulableIfNotPresent adds an unschedulable pod back to scheduling queue. + // The podSchedulingCycle represents the current scheduling cycle number which can be + // returned by calling SchedulingCycle(). + AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error + // SchedulingCycle returns the current number of scheduling cycle which is + // cached by scheduling queue. Normally, incrementing this number whenever + // a pod is popped (e.g. called Pop()) is enough. + SchedulingCycle() int64 // Pop removes the head of the queue and returns it. It blocks if the // queue is empty and waits until a new item is added to the queue. Pop() (*v1.Pod, error) @@ -111,10 +118,15 @@ func (f *FIFO) AddIfNotPresent(pod *v1.Pod) error { // AddUnschedulableIfNotPresent adds an unschedulable pod back to the queue. In // FIFO it is added to the end of the queue. -func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod) error { +func (f *FIFO) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error { return f.FIFO.AddIfNotPresent(pod) } +// SchedulingCycle implements SchedulingQueue.SchedulingCycle interface. +func (f *FIFO) SchedulingCycle() int64 { + return 0 +} + // Update updates a pod in the FIFO. func (f *FIFO) Update(oldPod, newPod *v1.Pod) error { return f.FIFO.Update(newPod) @@ -218,12 +230,14 @@ type PriorityQueue struct { // nominatedPods is a structures that stores pods which are nominated to run // on nodes. nominatedPods *nominatedPodMap - // receivedMoveRequest is set to true whenever we receive a request to move a - // pod from the unschedulableQ to the activeQ, and is set to false, when we pop - // a pod from the activeQ. It indicates if we received a move request when a - // pod was in flight (we were trying to schedule it). In such a case, we put - // the pod back into the activeQ if it is determined unschedulable. - receivedMoveRequest bool + // schedulingCycle represents sequence number of scheduling cycle and is incremented + // when a pod is popped. + schedulingCycle int64 + // moveRequestCycle caches the sequence number of scheduling cycle when we + // received a move request. Unscheduable pods in and before this scheduling + // cycle will be put back to activeQueue if we were trying to schedule them + // when we received move request. + moveRequestCycle int64 // closed indicates that the queue is closed. // It is mainly used to let Pop() exit its control loop while waiting for an item. @@ -265,12 +279,13 @@ func NewPriorityQueue(stop <-chan struct{}) *PriorityQueue { // NewPriorityQueueWithClock creates a PriorityQueue which uses the passed clock for time. func NewPriorityQueueWithClock(stop <-chan struct{}, clock util.Clock) *PriorityQueue { pq := &PriorityQueue{ - clock: clock, - stop: stop, - podBackoff: util.CreatePodBackoffWithClock(1*time.Second, 10*time.Second, clock), - activeQ: util.NewHeap(cache.MetaNamespaceKeyFunc, activeQComp), - unschedulableQ: newUnschedulablePodsMap(), - nominatedPods: newNominatedPodMap(), + clock: clock, + stop: stop, + podBackoff: util.CreatePodBackoffWithClock(1*time.Second, 10*time.Second, clock), + activeQ: util.NewHeap(cache.MetaNamespaceKeyFunc, activeQComp), + unschedulableQ: newUnschedulablePodsMap(), + nominatedPods: newNominatedPodMap(), + moveRequestCycle: -1, } pq.cond.L = &pq.lock pq.podBackoffQ = util.NewHeap(cache.MetaNamespaceKeyFunc, pq.podsCompareBackoffCompleted) @@ -372,12 +387,19 @@ func (p *PriorityQueue) backoffPod(pod *v1.Pod) { } } +// SchedulingCycle returns current scheduling cycle. +func (p *PriorityQueue) SchedulingCycle() int64 { + p.lock.RLock() + defer p.lock.RUnlock() + return p.schedulingCycle +} + // AddUnschedulableIfNotPresent does nothing if the pod is present in any // queue. If pod is unschedulable, it adds pod to unschedulable queue if -// p.receivedMoveRequest is false or to backoff queue if p.receivedMoveRequest -// is true but pod is subject to backoff. In other cases, it adds pod to active -// queue and clears p.receivedMoveRequest. -func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { +// p.moveRequestCycle > podSchedulingCycle or to backoff queue if p.moveRequestCycle +// <= podSchedulingCycle but pod is subject to backoff. In other cases, it adds pod to +// active queue. +func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod, podSchedulingCycle int64) error { p.lock.Lock() defer p.lock.Unlock() if p.unschedulableQ.get(pod) != nil { @@ -389,7 +411,7 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { if _, exists, _ := p.podBackoffQ.Get(pod); exists { return fmt.Errorf("pod is already present in the backoffQ") } - if !p.receivedMoveRequest && isPodUnschedulable(pod) { + if podSchedulingCycle > p.moveRequestCycle && isPodUnschedulable(pod) { p.backoffPod(pod) p.unschedulableQ.addOrUpdate(pod) p.nominatedPods.add(pod, "") @@ -412,7 +434,6 @@ func (p *PriorityQueue) AddUnschedulableIfNotPresent(pod *v1.Pod) error { p.nominatedPods.add(pod, "") p.cond.Broadcast() } - p.receivedMoveRequest = false return err } @@ -470,7 +491,8 @@ func (p *PriorityQueue) flushUnschedulableQLeftover() { } // Pop removes the head of the active queue and returns it. It blocks if the -// activeQ is empty and waits until a new item is added to the queue. +// activeQ is empty and waits until a new item is added to the queue. It +// increments scheduling cycle when a pod is popped. func (p *PriorityQueue) Pop() (*v1.Pod, error) { p.lock.Lock() defer p.lock.Unlock() @@ -488,6 +510,7 @@ func (p *PriorityQueue) Pop() (*v1.Pod, error) { return nil, err } pod := obj.(*v1.Pod) + p.schedulingCycle++ return pod, err } @@ -608,7 +631,7 @@ func (p *PriorityQueue) MoveAllToActiveQueue() { } } p.unschedulableQ.clear() - p.receivedMoveRequest = true + p.moveRequestCycle = p.schedulingCycle p.cond.Broadcast() } @@ -626,7 +649,7 @@ func (p *PriorityQueue) movePodsToActiveQueue(pods []*v1.Pod) { } p.unschedulableQ.delete(pod) } - p.receivedMoveRequest = true + p.moveRequestCycle = p.schedulingCycle p.cond.Broadcast() } diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 15dca6bf2c9..f0ca6751fec 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -179,9 +179,9 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) { func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { q := NewPriorityQueue(nil) q.Add(&highPriNominatedPod) - q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything. - q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ. - q.AddUnschedulableIfNotPresent(&unschedulablePod) + q.AddUnschedulableIfNotPresent(&highPriNominatedPod, q.SchedulingCycle()) // Must not add anything. + q.AddUnschedulableIfNotPresent(&medPriorityPod, q.SchedulingCycle()) // This should go to activeQ. + q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle()) expectedNominatedPods := &nominatedPodMap{ nominatedPodToNode: map[types.UID]string{ medPriorityPod.UID: "node1", @@ -209,6 +209,78 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) { } } +// TestPriorityQueue_AddUnschedulableIfNotPresent_Async tests scenario when +// AddUnschedulableIfNotPresent is called asynchronously pods in and before +// current scheduling cycle will be put back to activeQueue if we were trying +// to schedule them when we received move request. +func TestPriorityQueue_AddUnschedulableIfNotPresent_Async(t *testing.T) { + q := NewPriorityQueue(nil) + totalNum := 10 + expectedPods := make([]v1.Pod, 0, totalNum) + for i := 0; i < totalNum; i++ { + priority := int32(i) + p := v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod%d", i), + Namespace: fmt.Sprintf("ns%d", i), + UID: types.UID(fmt.Sprintf("upns%d", i)), + }, + Spec: v1.PodSpec{ + Priority: &priority, + }, + } + expectedPods = append(expectedPods, p) + // priority is to make pods ordered in the PriorityQueue + q.Add(&p) + } + + // Pop all pods except for the first one + for i := totalNum - 1; i > 0; i-- { + p, _ := q.Pop() + if !reflect.DeepEqual(&expectedPods[i], p) { + t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[i], p) + } + } + + // move all pods to active queue when we were trying to schedule them + q.MoveAllToActiveQueue() + moveReqChan := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(totalNum - 1) + // mark pods[1] ~ pods[totalNum-1] as unschedulable, fire goroutines to add them back later + for i := 1; i < totalNum; i++ { + unschedulablePod := expectedPods[i].DeepCopy() + unschedulablePod.Status = v1.PodStatus{ + Conditions: []v1.PodCondition{ + { + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: v1.PodReasonUnschedulable, + }, + }, + } + cycle := q.SchedulingCycle() + go func() { + <-moveReqChan + q.AddUnschedulableIfNotPresent(unschedulablePod, cycle) + wg.Done() + }() + } + firstPod, _ := q.Pop() + if !reflect.DeepEqual(&expectedPods[0], firstPod) { + t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[0], firstPod) + } + // close moveReqChan here to make sure q.AddUnschedulableIfNotPresent is called after another pod is popped + close(moveReqChan) + wg.Wait() + // all other pods should be in active queue again + for i := 1; i < totalNum; i++ { + if _, exists, _ := q.activeQ.Get(&expectedPods[i]); !exists { + t.Errorf("Expected %v to be added to activeQ.", expectedPods[i].Name) + } + } +} + func TestPriorityQueue_Pop(t *testing.T) { q := NewPriorityQueue(nil) wg := sync.WaitGroup{} @@ -680,7 +752,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) { LastProbeTime: metav1.Now(), }) // Put in the unschedulable queue. - q.AddUnschedulableIfNotPresent(p1) + q.AddUnschedulableIfNotPresent(p1, q.SchedulingCycle()) // Move all unschedulable pods to the active queue. q.MoveAllToActiveQueue() // Simulation is over. Now let's pop all pods. The pod popped first should be @@ -728,7 +800,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { LastProbeTime: metav1.Now(), }) // Put in the unschedulable queue - q.AddUnschedulableIfNotPresent(&unschedulablePod) + q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle()) // Clear its backoff to simulate backoff its expiration q.clearPodBackoff(&unschedulablePod) // Move all unschedulable pods to the active queue. @@ -771,7 +843,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) { LastProbeTime: metav1.Now(), }) // And then, put unschedulable pod to the unschedulable queue - q.AddUnschedulableIfNotPresent(&unschedulablePod) + q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle()) // Clear its backoff to simulate its backoff expiration q.clearPodBackoff(&unschedulablePod) // Move all unschedulable pods to the active queue. @@ -838,7 +910,7 @@ func TestHighProirotyBackoff(t *testing.T) { Message: "fake scheduling failure", }) // Put in the unschedulable queue. - q.AddUnschedulableIfNotPresent(p) + q.AddUnschedulableIfNotPresent(p, q.SchedulingCycle()) // Move all unschedulable pods to the active queue. q.MoveAllToActiveQueue() diff --git a/pkg/security/apparmor/BUILD b/pkg/security/apparmor/BUILD index 576a2780306..fb29986efd4 100644 --- a/pkg/security/apparmor/BUILD +++ b/pkg/security/apparmor/BUILD @@ -17,9 +17,9 @@ go_library( deps = [ "//pkg/features:go_default_library", "//pkg/kubelet/types:go_default_library", - "//pkg/util/file:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/security/apparmor/validate.go b/pkg/security/apparmor/validate.go index 25ea591fca0..1d03c7ee02d 100644 --- a/pkg/security/apparmor/validate.go +++ b/pkg/security/apparmor/validate.go @@ -29,7 +29,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" - utilfile "k8s.io/kubernetes/pkg/util/file" + utilpath "k8s.io/utils/path" ) // Whether AppArmor should be disabled by default. @@ -195,7 +195,7 @@ func getAppArmorFS() (string, error) { } if fields[2] == "securityfs" { appArmorFS := path.Join(fields[1], "apparmor") - if ok, err := utilfile.FileExists(appArmorFS); !ok { + if ok, err := utilpath.Exists(utilpath.CheckFollowSymlink, appArmorFS); !ok { msg := fmt.Sprintf("path %s does not exist", appArmorFS) if err != nil { return "", fmt.Errorf("%s: %v", msg, err) diff --git a/pkg/util/BUILD b/pkg/util/BUILD index 35e298f15bf..77bb1f119a4 100644 --- a/pkg/util/BUILD +++ b/pkg/util/BUILD @@ -20,7 +20,6 @@ filegroup( "//pkg/util/dbus:all-srcs", "//pkg/util/ebtables:all-srcs", "//pkg/util/env:all-srcs", - "//pkg/util/file:all-srcs", "//pkg/util/filesystem:all-srcs", "//pkg/util/flag:all-srcs", "//pkg/util/flock:all-srcs", diff --git a/pkg/util/file/BUILD b/pkg/util/file/BUILD deleted file mode 100644 index 299528ca00f..00000000000 --- a/pkg/util/file/BUILD +++ /dev/null @@ -1,36 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", - "go_test", -) - -go_library( - name = "go_default_library", - srcs = ["file.go"], - importpath = "k8s.io/kubernetes/pkg/util/file", -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], -) - -go_test( - name = "go_default_test", - srcs = ["file_test.go"], - embed = [":go_default_library"], - deps = [ - "//vendor/github.com/spf13/afero:go_default_library", - "//vendor/github.com/stretchr/testify/assert:go_default_library", - ], -) diff --git a/pkg/util/file/file.go b/pkg/util/file/file.go deleted file mode 100644 index 70d26c4ef74..00000000000 --- a/pkg/util/file/file.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package file - -import ( - "os" -) - -// FileExists checks if specified file exists. -func FileExists(filename string) (bool, error) { - if _, err := os.Stat(filename); os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil -} - -// FileOrSymlinkExists checks if specified file or symlink exists. -func FileOrSymlinkExists(filename string) (bool, error) { - if _, err := os.Lstat(filename); os.IsNotExist(err) { - return false, nil - } else if err != nil { - return false, err - } - return true, nil -} - -// ReadDirNoStat returns a string of files/directories contained -// in dirname without calling lstat on them. -func ReadDirNoStat(dirname string) ([]string, error) { - if dirname == "" { - dirname = "." - } - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - return f.Readdirnames(-1) -} diff --git a/pkg/util/file/file_test.go b/pkg/util/file/file_test.go deleted file mode 100644 index 43eb2ed1d1f..00000000000 --- a/pkg/util/file/file_test.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package file - -import ( - "os" - "path/filepath" - "sort" - "testing" - - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" -) - -func RecoverEnv(wd, tmpDir string) { - os.Chdir(wd) - os.RemoveAll(tmpDir) -} - -func TestFileUtils(t *testing.T) { - fs := &afero.Afero{Fs: afero.NewOsFs()} - // Create tmp dir - tmpDir, err := fs.TempDir(os.TempDir(), "util_file_test_") - if err != nil { - t.Fatal("Failed to test: failed to create temp dir.") - } - - // create tmp file - tmpFile, err := fs.TempFile(tmpDir, "test_file_exists_") - if err != nil { - t.Fatal("Failed to test: failed to create temp file.") - } - - // create tmp sym link - tmpSymlinkName := filepath.Join(tmpDir, "test_file_exists_sym_link") - err = os.Symlink(tmpFile.Name(), tmpSymlinkName) - if err != nil { - t.Fatal("Failed to test: failed to create sym link.") - } - - // create tmp sub dir - tmpSubDir, err := fs.TempDir(tmpDir, "sub_") - if err != nil { - t.Fatal("Failed to test: failed to create temp sub dir.") - } - - // record the current dir - currentDir, err := os.Getwd() - if err != nil { - t.Fatal("Failed to test: failed to get current dir.") - } - - // change the work dir to temp dir - err = os.Chdir(tmpDir) - if err != nil { - t.Fatal("Failed to test: failed to change work dir.") - } - - // recover test environment - defer RecoverEnv(currentDir, tmpDir) - - t.Run("TestFileExists", func(t *testing.T) { - tests := []struct { - name string - fileName string - expectedError bool - expectedValue bool - }{ - {"file_not_exists", filepath.Join(tmpDir, "file_not_exist_case"), false, false}, - {"file_exists", tmpFile.Name(), false, true}, - } - - for _, test := range tests { - realValued, realError := FileExists(test.fileName) - if test.expectedError { - assert.Errorf(t, realError, "Failed to test with '%s': %s", test.fileName, test.name) - } else { - assert.EqualValuesf(t, test.expectedValue, realValued, "Failed to test with '%s': %s", test.fileName, test.name) - } - } - }) - - t.Run("TestFileOrSymlinkExists", func(t *testing.T) { - tests := []struct { - name string - fileName string - expectedError bool - expectedValue bool - }{ - {"file_not_exists", filepath.Join(tmpDir, "file_not_exist_case"), false, false}, - {"file_exists", tmpFile.Name(), false, true}, - {"symlink_exists", tmpSymlinkName, false, true}, - } - - for _, test := range tests { - realValued, realError := FileOrSymlinkExists(test.fileName) - if test.expectedError { - assert.Errorf(t, realError, "Failed to test with '%s': %s", test.fileName, test.name) - } else { - assert.EqualValuesf(t, test.expectedValue, realValued, "Failed to test with '%s': %s", test.fileName, test.name) - } - } - }) - - t.Run("TestReadDirNoStat", func(t *testing.T) { - _, tmpFileSimpleName := filepath.Split(tmpFile.Name()) - _, tmpSymlinkSimpleName := filepath.Split(tmpSymlinkName) - _, tmpSubDirSimpleName := filepath.Split(tmpSubDir) - - tests := []struct { - name string - dirName string - expectedError bool - expectedValue []string - }{ - {"dir_not_exists", filepath.Join(tmpDir, "file_not_exist_case"), true, []string{}}, - {"dir_is_empty", "", false, []string{tmpFileSimpleName, tmpSymlinkSimpleName, tmpSubDirSimpleName}}, - {"dir_exists", tmpDir, false, []string{tmpFileSimpleName, tmpSymlinkSimpleName, tmpSubDirSimpleName}}, - } - - for _, test := range tests { - realValued, realError := ReadDirNoStat(test.dirName) - - // execute sort action before compare - sort.Strings(realValued) - sort.Strings(test.expectedValue) - - if test.expectedError { - assert.Errorf(t, realError, "Failed to test with '%s': %s", test.dirName, test.name) - } else { - assert.EqualValuesf(t, test.expectedValue, realValued, "Failed to test with '%s': %s", test.dirName, test.name) - } - } - }) -} diff --git a/pkg/util/mount/BUILD b/pkg/util/mount/BUILD index 60998c1d228..d2eae31ed0f 100644 --- a/pkg/util/mount/BUILD +++ b/pkg/util/mount/BUILD @@ -35,11 +35,11 @@ go_library( "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:linux": [ - "//pkg/util/file:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/golang.org/x/sys/unix:go_default_library", "//vendor/k8s.io/utils/io:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], "@io_bazel_rules_go//go/platform:nacl": [ "//vendor/k8s.io/utils/nsenter:go_default_library", @@ -57,8 +57,8 @@ go_library( "//vendor/k8s.io/utils/nsenter:go_default_library", ], "@io_bazel_rules_go//go/platform:windows": [ - "//pkg/util/file:go_default_library", "//vendor/k8s.io/utils/nsenter:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], "//conditions:default": [], }), diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index b0ef01bae4d..c66039e5e09 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -33,9 +33,9 @@ import ( "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" - utilfile "k8s.io/kubernetes/pkg/util/file" utilexec "k8s.io/utils/exec" utilio "k8s.io/utils/io" + utilpath "k8s.io/utils/path" ) const ( @@ -417,7 +417,7 @@ func (mounter *Mounter) MakeFile(pathname string) error { } func (mounter *Mounter) ExistsPath(pathname string) (bool, error) { - return utilfile.FileExists(pathname) + return utilpath.Exists(utilpath.CheckFollowSymlink, pathname) } func (mounter *Mounter) EvalHostSymlinks(pathname string) (string, error) { diff --git a/pkg/util/mount/mount_windows.go b/pkg/util/mount/mount_windows.go index f4508c5227d..89d09c45ab9 100644 --- a/pkg/util/mount/mount_windows.go +++ b/pkg/util/mount/mount_windows.go @@ -30,7 +30,7 @@ import ( "k8s.io/klog" - utilfile "k8s.io/kubernetes/pkg/util/file" + utilpath "k8s.io/utils/path" ) // Mounter provides the default implementation of mount.Interface @@ -235,7 +235,7 @@ func (mounter *Mounter) MakeFile(pathname string) error { // ExistsPath checks whether the path exists func (mounter *Mounter) ExistsPath(pathname string) (bool, error) { - return utilfile.FileExists(pathname) + return utilpath.Exists(utilpath.CheckFollowSymlink, pathname) } // EvalHostSymlinks returns the path name after evaluating symlinks diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index e77ee6f75f3..ce5d825da79 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -27,8 +27,8 @@ import ( "golang.org/x/sys/unix" "k8s.io/klog" - utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/utils/nsenter" + utilpath "k8s.io/utils/path" ) const ( @@ -291,7 +291,7 @@ func (mounter *NsenterMounter) ExistsPath(pathname string) (bool, error) { return false, err } kubeletpath := mounter.ne.KubeletPath(hostPath) - return utilfile.FileExists(kubeletpath) + return utilpath.Exists(utilpath.CheckFollowSymlink, kubeletpath) } func (mounter *NsenterMounter) EvalHostSymlinks(pathname string) (string, error) { diff --git a/pkg/util/workqueue/prometheus/BUILD b/pkg/util/workqueue/prometheus/BUILD index 62a98289a50..d9708f7eb4f 100644 --- a/pkg/util/workqueue/prometheus/BUILD +++ b/pkg/util/workqueue/prometheus/BUILD @@ -12,6 +12,7 @@ go_library( deps = [ "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", + "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/util/workqueue/prometheus/prometheus.go b/pkg/util/workqueue/prometheus/prometheus.go index a93b70cc726..e9c856c1aee 100644 --- a/pkg/util/workqueue/prometheus/prometheus.go +++ b/pkg/util/workqueue/prometheus/prometheus.go @@ -18,6 +18,7 @@ package prometheus import ( "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "github.com/prometheus/client_golang/prometheus" ) @@ -133,7 +134,9 @@ func (prometheusMetricsProvider) NewDeprecatedDepthMetric(name string) workqueue Name: "depth", Help: "(Deprecated) Current depth of workqueue: " + name, }) - prometheus.Register(depth) + if err := prometheus.Register(depth); err != nil { + klog.Errorf("failed to register depth metric %v: %v", name, err) + } return depth } @@ -143,7 +146,9 @@ func (prometheusMetricsProvider) NewDeprecatedAddsMetric(name string) workqueue. Name: "adds", Help: "(Deprecated) Total number of adds handled by workqueue: " + name, }) - prometheus.Register(adds) + if err := prometheus.Register(adds); err != nil { + klog.Errorf("failed to register adds metric %v: %v", name, err) + } return adds } @@ -153,7 +158,9 @@ func (prometheusMetricsProvider) NewDeprecatedLatencyMetric(name string) workque Name: "queue_latency", Help: "(Deprecated) How long an item stays in workqueue" + name + " before being requested.", }) - prometheus.Register(latency) + if err := prometheus.Register(latency); err != nil { + klog.Errorf("failed to register latency metric %v: %v", name, err) + } return latency } @@ -163,7 +170,9 @@ func (prometheusMetricsProvider) NewDeprecatedWorkDurationMetric(name string) wo Name: "work_duration", Help: "(Deprecated) How long processing an item from workqueue" + name + " takes.", }) - prometheus.Register(workDuration) + if err := prometheus.Register(workDuration); err != nil { + klog.Errorf("failed to register work_duration metric %v: %v", name, err) + } return workDuration } @@ -176,7 +185,9 @@ func (prometheusMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name s "values indicate stuck threads. One can deduce the number of stuck " + "threads by observing the rate at which this increases.", }) - prometheus.Register(unfinished) + if err := prometheus.Register(unfinished); err != nil { + klog.Errorf("failed to register unfinished_work_seconds metric %v: %v", name, err) + } return unfinished } @@ -187,7 +198,9 @@ func (prometheusMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecond Help: "(Deprecated) How many microseconds has the longest running " + "processor for " + name + " been running.", }) - prometheus.Register(unfinished) + if err := prometheus.Register(unfinished); err != nil { + klog.Errorf("failed to register longest_running_processor_microseconds metric %v: %v", name, err) + } return unfinished } @@ -197,6 +210,8 @@ func (prometheusMetricsProvider) NewDeprecatedRetriesMetric(name string) workque Name: "retries", Help: "(Deprecated) Total number of retries handled by workqueue: " + name, }) - prometheus.Register(retries) + if err := prometheus.Register(retries); err != nil { + klog.Errorf("failed to register retries metric %v: %v", name, err) + } return retries } diff --git a/pkg/volume/gcepd/BUILD b/pkg/volume/gcepd/BUILD index ed00e269c04..630bad76e7c 100644 --- a/pkg/volume/gcepd/BUILD +++ b/pkg/volume/gcepd/BUILD @@ -20,7 +20,6 @@ go_library( "//pkg/cloudprovider/providers/gce:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", @@ -35,6 +34,7 @@ go_library( "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/volume/gcepd/gce_util.go b/pkg/volume/gcepd/gce_util.go index d0e13a8ddf7..0d7e2126e24 100644 --- a/pkg/volume/gcepd/gce_util.go +++ b/pkg/volume/gcepd/gce_util.go @@ -32,11 +32,11 @@ import ( gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - utilfile "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/utils/exec" + utilpath "k8s.io/utils/path" ) const ( @@ -236,7 +236,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String, diskName st // Calls scsi_id on the given devicePath to get the serial number reported by that device. func getScsiSerial(devicePath, diskName string) (string, error) { - exists, err := utilfile.FileExists("/lib/udev/scsi_id") + exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, "/lib/udev/scsi_id") if err != nil { return "", fmt.Errorf("failed to check scsi_id existence: %v", err) } diff --git a/pkg/volume/host_path/BUILD b/pkg/volume/host_path/BUILD index b05d42c64b3..0c961173ba4 100644 --- a/pkg/volume/host_path/BUILD +++ b/pkg/volume/host_path/BUILD @@ -31,7 +31,6 @@ go_test( srcs = ["host_path_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", @@ -41,6 +40,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index f29e1599475..16c6697b640 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -27,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/kubernetes/fake" - utilfile "k8s.io/kubernetes/pkg/util/file" utilmount "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" + utilpath "k8s.io/utils/path" ) func newHostPathType(pathType string) *v1.HostPathType { @@ -122,7 +122,7 @@ func TestDeleter(t *testing.T) { if err := deleter.Delete(); err != nil { t.Errorf("Mock Recycler expected to return nil but got %s", err) } - if exists, _ := utilfile.FileExists(tempPath); exists { + if exists, _ := utilpath.Exists(utilpath.CheckFollowSymlink, tempPath); exists { t.Errorf("Temp path expected to be deleted, but was found at %s", tempPath) } } diff --git a/pkg/volume/rbd/BUILD b/pkg/volume/rbd/BUILD index 0a5ca33c552..11232c8df7a 100644 --- a/pkg/volume/rbd/BUILD +++ b/pkg/volume/rbd/BUILD @@ -18,7 +18,6 @@ go_library( importpath = "k8s.io/kubernetes/pkg/volume/rbd", deps = [ "//pkg/features:go_default_library", - "//pkg/util/file:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/node:go_default_library", "//pkg/volume:go_default_library", @@ -36,6 +35,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/strings:go_default_library", + "//vendor/k8s.io/utils/path:go_default_library", ], ) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index a4afb518605..613987279f5 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -37,11 +37,11 @@ import ( "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" - fileutil "k8s.io/kubernetes/pkg/util/file" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/volume" volutil "k8s.io/kubernetes/pkg/volume/util" + utilpath "k8s.io/utils/path" ) const ( @@ -486,7 +486,7 @@ func (util *RBDUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, devic // Currently, we don't persist rbd info on the disk, but for backward // compatbility, we need to clean it if found. rbdFile := path.Join(deviceMountPath, "rbd.json") - exists, err := fileutil.FileExists(rbdFile) + exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, rbdFile) if err != nil { return err } diff --git a/staging/src/k8s.io/api/README.md b/staging/src/k8s.io/api/README.md index 967543a4545..7d8bb93f629 100644 --- a/staging/src/k8s.io/api/README.md +++ b/staging/src/k8s.io/api/README.md @@ -1 +1,19 @@ -This repo is still in the experimental stage. Shortly it will contain the schema of the API that are served by the Kubernetes apiserver. +# api + +Schema of the external API types that are served by the Kubernetes API server. + +## Purpose + +This library is the canonical location of the Kubernetes API definition. Most likely interaction with this repository is as a dependency of client-go. + +## Compatibility + +Branches track Kubernetes branches and are compatible with that repo. + +## Where does it come from? + +`api` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/api. Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. + +## Things you should *NOT* do + +1. https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/api is synced to k8s.io/api. All changes must be made in the former. The latter is read-only. diff --git a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json index 850dc10b21c..57ac02a4fa3 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiextensions-apiserver/Godeps/Godeps.json @@ -372,7 +372,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/update-codegen.sh b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/update-codegen.sh index ea26ad63cca..809ee02feef 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/update-codegen.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/update-codegen.sh @@ -18,17 +18,17 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. -${CODEGEN_PKG}/generate-groups.sh all \ +"${CODEGEN_PKG}/generate-groups.sh" all \ k8s.io/apiextensions-apiserver/examples/client-go/pkg/client k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis \ cr:v1 \ - --output-base "$(dirname ${BASH_SOURCE})/../../../../.." + --output-base "$(dirname "${BASH_SOURCE[0]}")/../../../../.." # To use your own boilerplate text append: # --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt diff --git a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/verify-codegen.sh b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/verify-codegen.sh index d02a6fa395e..d91566e6228 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/verify-codegen.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/examples/client-go/hack/verify-codegen.sh @@ -18,7 +18,7 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. DIFFROOT="${SCRIPT_ROOT}/pkg" TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" diff --git a/staging/src/k8s.io/apiextensions-apiserver/hack/build-image.sh b/staging/src/k8s.io/apiextensions-apiserver/hack/build-image.sh index d0d129078f3..1f7bb58c334 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/hack/build-image.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/hack/build-image.sh @@ -14,8 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -o errexit +set -o nounset +set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../../../.. source "${KUBE_ROOT}/hack/lib/util.sh" # Register function to be called on EXIT to remove generated binary. diff --git a/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh b/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh index 0dc83d07f21..d5b08c6c5da 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/hack/update-codegen.sh @@ -18,8 +18,8 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of @@ -27,8 +27,8 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge # instead of the $GOPATH directly. For normal projects this can be dropped. CLIENTSET_NAME_VERSIONED=clientset \ CLIENTSET_NAME_INTERNAL=internalclientset \ -${CODEGEN_PKG}/generate-internal-groups.sh all \ +"${CODEGEN_PKG}/generate-internal-groups.sh" all \ k8s.io/apiextensions-apiserver/pkg/client k8s.io/apiextensions-apiserver/pkg/apis k8s.io/apiextensions-apiserver/pkg/apis \ "apiextensions:v1beta1" \ - --output-base "$(dirname ${BASH_SOURCE})/../../.." \ - --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt + --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \ + --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt" diff --git a/staging/src/k8s.io/apiextensions-apiserver/hack/verify-codegen.sh b/staging/src/k8s.io/apiextensions-apiserver/hack/verify-codegen.sh index ab43ba23d01..d91566e6228 100755 --- a/staging/src/k8s.io/apiextensions-apiserver/hack/verify-codegen.sh +++ b/staging/src/k8s.io/apiextensions-apiserver/hack/verify-codegen.sh @@ -18,8 +18,7 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. -SCRIPT_BASE=${SCRIPT_ROOT}/../.. +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. DIFFROOT="${SCRIPT_ROOT}/pkg" TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index c2ebdcf1709..e3e3d27c0b0 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -79,7 +79,7 @@ func NewCRDFinalizer( crdLister: crdInformer.Lister(), crdSynced: crdInformer.Informer().HasSynced, crClientGetter: crClientGetter, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CustomResourceDefinition-CRDFinalizer"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_finalizer"), } crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index fe5c9479847..1bea5f440b9 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -66,7 +66,7 @@ func NewNamingConditionController( crdClient: crdClient, crdLister: crdInformer.Lister(), crdSynced: crdInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CustomResourceDefinition-NamingConditionController"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "crd_naming_condition_controller"), } informerIndexer := crdInformer.Informer().GetIndexer() diff --git a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json index 52655ff968b..ce90141c15b 100644 --- a/staging/src/k8s.io/apimachinery/Godeps/Godeps.json +++ b/staging/src/k8s.io/apimachinery/Godeps/Godeps.json @@ -24,7 +24,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD index 6b52ceff0fe..1b3350b7f27 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/BUILD @@ -9,6 +9,7 @@ load( go_test( name = "go_default_test", srcs = [ + "codec_test.go", "conversion_test.go", "converter_test.go", "embedded_test.go", diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go b/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go index 6b859b28897..284e32bc3cb 100644 --- a/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/codec.go @@ -283,6 +283,7 @@ var _ GroupVersioner = multiGroupVersioner{} type multiGroupVersioner struct { target schema.GroupVersion acceptedGroupKinds []schema.GroupKind + coerce bool } // NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. @@ -294,6 +295,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} } +// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind. +// Incoming kinds that match the provided groupKinds are preferred. +// Kind may be empty in the provided group kind, in which case any kind will match. +// Examples: +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} +} + // KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will // use the originating kind where possible. func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { @@ -308,5 +325,8 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio return v.target.WithKind(src.Kind), true } } + if v.coerce && len(kinds) > 0 { + return v.target.WithKind(kinds[0].Kind), true + } return schema.GroupVersionKind{}, false } diff --git a/staging/src/k8s.io/apimachinery/pkg/runtime/codec_test.go b/staging/src/k8s.io/apimachinery/pkg/runtime/codec_test.go new file mode 100644 index 00000000000..be169f781ac --- /dev/null +++ b/staging/src/k8s.io/apimachinery/pkg/runtime/codec_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func gv(group, version string) schema.GroupVersion { + return schema.GroupVersion{Group: group, Version: version} +} +func gvk(group, version, kind string) schema.GroupVersionKind { + return schema.GroupVersionKind{Group: group, Version: version, Kind: kind} +} +func gk(group, kind string) schema.GroupKind { + return schema.GroupKind{Group: group, Kind: kind} +} + +func TestCoercingMultiGroupVersioner(t *testing.T) { + testcases := []struct { + name string + target schema.GroupVersion + preferredKinds []schema.GroupKind + kinds []schema.GroupVersionKind + expectKind schema.GroupVersionKind + }{ + { + name: "matched preferred group/kind", + target: gv("mygroup", "__internal"), + preferredKinds: []schema.GroupKind{gk("mygroup", "Foo"), gk("anothergroup", "Bar")}, + kinds: []schema.GroupVersionKind{gvk("yetanother", "v1", "Baz"), gvk("anothergroup", "v1", "Bar")}, + expectKind: gvk("mygroup", "__internal", "Bar"), + }, + { + name: "matched preferred group", + target: gv("mygroup", "__internal"), + preferredKinds: []schema.GroupKind{gk("mygroup", ""), gk("anothergroup", "")}, + kinds: []schema.GroupVersionKind{gvk("yetanother", "v1", "Baz"), gvk("anothergroup", "v1", "Bar")}, + expectKind: gvk("mygroup", "__internal", "Bar"), + }, + { + name: "no preferred group/kind match, uses first kind in list", + target: gv("mygroup", "__internal"), + preferredKinds: []schema.GroupKind{gk("mygroup", ""), gk("anothergroup", "")}, + kinds: []schema.GroupVersionKind{gvk("yetanother", "v1", "Baz"), gvk("yetanother", "v1", "Bar")}, + expectKind: gvk("mygroup", "__internal", "Baz"), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + v := NewCoercingMultiGroupVersioner(tc.target, tc.preferredKinds...) + kind, ok := v.KindForGroupVersionKinds(tc.kinds) + if !ok { + t.Error("got no kind") + } + if kind != tc.expectKind { + t.Errorf("expected %#v, got %#v", tc.expectKind, kind) + } + }) + } +} diff --git a/staging/src/k8s.io/apiserver/Godeps/Godeps.json b/staging/src/k8s.io/apiserver/Godeps/Godeps.json index f620b658ba1..cad4fc800c8 100644 --- a/staging/src/k8s.io/apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/apiserver/Godeps/Godeps.json @@ -372,7 +372,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/ghodss/yaml", diff --git a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_codec.go b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_codec.go index 113ac573cf3..e2f91bf13d2 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/storage/storage_codec.go +++ b/staging/src/k8s.io/apiserver/pkg/server/storage/storage_codec.go @@ -85,7 +85,7 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) { ) decoder := opts.StorageSerializer.DecoderToVersion( recognizer.NewDecoder(decoders...), - runtime.NewMultiGroupVersioner( + runtime.NewCoercingMultiGroupVersioner( opts.MemoryVersion, schema.GroupKind{Group: opts.MemoryVersion.Group}, schema.GroupKind{Group: opts.StorageVersion.Group}, diff --git a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json index d7a51f34230..bea1493a2f5 100644 --- a/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json +++ b/staging/src/k8s.io/cli-runtime/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/client-go/Godeps/Godeps.json b/staging/src/k8s.io/client-go/Godeps/Godeps.json index 766154c6add..be29f32bc7f 100644 --- a/staging/src/k8s.io/client-go/Godeps/Godeps.json +++ b/staging/src/k8s.io/client-go/Godeps/Godeps.json @@ -56,7 +56,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/client-go/tools/cache/thread_safe_store.go b/staging/src/k8s.io/client-go/tools/cache/thread_safe_store.go index 1c201efb629..5cbdd17ed45 100644 --- a/staging/src/k8s.io/client-go/tools/cache/thread_safe_store.go +++ b/staging/src/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, } index := c.indices[indexName] - // need to de-dupe the return list. Since multiple keys are allowed, this can happen. - returnKeySet := sets.String{} - for _, indexKey := range indexKeys { - set := index[indexKey] - for _, key := range set.UnsortedList() { - returnKeySet.Insert(key) + var returnKeySet sets.String + if len(indexKeys) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + returnKeySet = index[indexKeys[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + returnKeySet = sets.String{} + for _, indexKey := range indexKeys { + for key := range index[indexKey] { + returnKeySet.Insert(key) + } } } diff --git a/staging/src/k8s.io/cloud-provider/BUILD b/staging/src/k8s.io/cloud-provider/BUILD index 4c0c21dcbe8..21126c5df83 100644 --- a/staging/src/k8s.io/cloud-provider/BUILD +++ b/staging/src/k8s.io/cloud-provider/BUILD @@ -33,6 +33,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/cloud-provider/node:all-srcs", + ], tags = ["automanaged"], ) diff --git a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json index 93aae245fc2..5393449dbac 100644 --- a/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json +++ b/staging/src/k8s.io/cloud-provider/Godeps/Godeps.json @@ -286,6 +286,10 @@ "ImportPath": "k8s.io/api/storage/v1beta1", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/api/equality", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/api/errors", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -398,6 +402,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/json", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/mergepatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/naming", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -414,6 +422,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/util/sets", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/pkg/util/strategicpatch", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/pkg/util/validation", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -438,6 +450,10 @@ "ImportPath": "k8s.io/apimachinery/pkg/watch", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" }, + { + "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/json", + "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + }, { "ImportPath": "k8s.io/apimachinery/third_party/forked/golang/reflect", "Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" @@ -950,6 +966,10 @@ "ImportPath": "k8s.io/klog", "Rev": "8139d8cb77af419532b33dfa7dd09fbc5f1d344f" }, + { + "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", + "Rev": "ced9eb3070a5f1c548ef46e8dfe2a97c208d9f03" + }, { "ImportPath": "k8s.io/utils/buffer", "Rev": "ed37f7428a91fc2a81070808937195dcd46d320e" diff --git a/staging/src/k8s.io/cloud-provider/node/BUILD b/staging/src/k8s.io/cloud-provider/node/BUILD new file mode 100644 index 00000000000..66c23210dfe --- /dev/null +++ b/staging/src/k8s.io/cloud-provider/node/BUILD @@ -0,0 +1,16 @@ +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [ + ":package-srcs", + "//staging/src/k8s.io/cloud-provider/node/helpers:all-srcs", + ], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/BUILD b/staging/src/k8s.io/cloud-provider/node/helpers/BUILD new file mode 100644 index 00000000000..9ac9a5a927a --- /dev/null +++ b/staging/src/k8s.io/cloud-provider/node/helpers/BUILD @@ -0,0 +1,33 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["taints.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/cloud-provider/node/helpers", + importpath = "k8s.io/cloud-provider/node/helpers", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/util/retry:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/staging/src/k8s.io/cloud-provider/node/helpers/taints.go b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go new file mode 100644 index 00000000000..2e3e31cce5e --- /dev/null +++ b/staging/src/k8s.io/cloud-provider/node/helpers/taints.go @@ -0,0 +1,140 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + +NOTE: the contents of this file has been copied from k8s.io/kubernetes/pkg/controller +and k8s.io/kubernetes/pkg/util/taints. The reason for duplicating this code is to remove +dependencies to k8s.io/kubernetes in all the cloud providers. Once k8s.io/kubernetes/pkg/util/taints +is moved to an external repository, this file should be removed and replaced with that one. +*/ + +package helpers + +import ( + "encoding/json" + "fmt" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + clientretry "k8s.io/client-go/util/retry" +) + +var updateTaintBackoff = wait.Backoff{ + Steps: 5, + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls +// to update nodes; otherwise, no API calls. Return error if any. +func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error { + if len(taints) == 0 { + return nil + } + firstTry := true + return clientretry.RetryOnConflict(updateTaintBackoff, func() error { + var err error + var oldNode *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + var newNode *v1.Node + oldNodeCopy := oldNode + updated := false + for _, taint := range taints { + curNewNode, ok, err := addOrUpdateTaint(oldNodeCopy, taint) + if err != nil { + return fmt.Errorf("failed to update taint of node") + } + updated = updated || ok + newNode = curNewNode + oldNodeCopy = curNewNode + } + if !updated { + return nil + } + return PatchNodeTaints(c, nodeName, oldNode, newNode) + }) +} + +// PatchNodeTaints patches node's taints. +func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { + oldData, err := json.Marshal(oldNode) + if err != nil { + return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) + } + + newTaints := newNode.Spec.Taints + newNodeClone := oldNode.DeepCopy() + newNodeClone.Spec.Taints = newTaints + newData, err := json.Marshal(newNodeClone) + if err != nil { + return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + } + + _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + return err +} + +// addOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func addOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { + newNode := node.DeepCopy() + nodeTaints := newNode.Spec.Taints + + var newTaints []v1.Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if equality.Semantic.DeepEqual(*taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} diff --git a/staging/src/k8s.io/csi-api/Godeps/Godeps.json b/staging/src/k8s.io/csi-api/Godeps/Godeps.json index c4e39f69fe4..4bad4f3392e 100644 --- a/staging/src/k8s.io/csi-api/Godeps/Godeps.json +++ b/staging/src/k8s.io/csi-api/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json index e39d8fa453e..83c2a8625b0 100644 --- a/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json +++ b/staging/src/k8s.io/kube-aggregator/Godeps/Godeps.json @@ -96,7 +96,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/go-openapi/jsonpointer", diff --git a/staging/src/k8s.io/metrics/Godeps/Godeps.json b/staging/src/k8s.io/metrics/Godeps/Godeps.json index 317fa041e5e..787839a7b07 100644 --- a/staging/src/k8s.io/metrics/Godeps/Godeps.json +++ b/staging/src/k8s.io/metrics/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/node-api/Godeps/Godeps.json b/staging/src/k8s.io/node-api/Godeps/Godeps.json index b2622459856..2ec3d9fcbd8 100644 --- a/staging/src/k8s.io/node-api/Godeps/Godeps.json +++ b/staging/src/k8s.io/node-api/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json index d117d1fbea6..30abbf59cd5 100644 --- a/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-apiserver/Godeps/Godeps.json @@ -88,7 +88,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/go-openapi/jsonpointer", diff --git a/staging/src/k8s.io/sample-apiserver/hack/build-image.sh b/staging/src/k8s.io/sample-apiserver/hack/build-image.sh index c1b3c768589..1402e366a9a 100755 --- a/staging/src/k8s.io/sample-apiserver/hack/build-image.sh +++ b/staging/src/k8s.io/sample-apiserver/hack/build-image.sh @@ -14,8 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -o errexit +set -o nounset +set -o pipefail -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../../.. +KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../../../.. source "${KUBE_ROOT}/hack/lib/util.sh" # Register function to be called on EXIT to remove generated binary. diff --git a/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh b/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh index 98c9ae9e1aa..d4883f401b4 100755 --- a/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh +++ b/staging/src/k8s.io/sample-apiserver/hack/update-codegen.sh @@ -18,24 +18,24 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. -CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} # generate the code with: # --output-base because this script should also be able to run inside the vendor dir of # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. -${CODEGEN_PKG}/generate-groups.sh all \ +"${CODEGEN_PKG}/generate-groups.sh" all \ k8s.io/sample-apiserver/pkg/client k8s.io/sample-apiserver/pkg/apis \ "wardle:v1alpha1,v1beta1" \ - --output-base "$(dirname ${BASH_SOURCE})/../../.." \ - --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt + --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \ + --go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt -${CODEGEN_PKG}/generate-internal-groups.sh "deepcopy,defaulter,conversion" \ +"${CODEGEN_PKG}/generate-internal-groups.sh" "deepcopy,defaulter,conversion" \ k8s.io/sample-apiserver/pkg/client k8s.io/sample-apiserver/pkg/apis k8s.io/sample-apiserver/pkg/apis \ "wardle:v1alpha1,v1beta1" \ - --output-base "$(dirname ${BASH_SOURCE})/../../.." \ - --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt + --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \ + --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt" # To use your own boilerplate text use: -# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt +# --go-header-file "${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt" diff --git a/staging/src/k8s.io/sample-apiserver/hack/verify-codegen.sh b/staging/src/k8s.io/sample-apiserver/hack/verify-codegen.sh index ab43ba23d01..d91566e6228 100755 --- a/staging/src/k8s.io/sample-apiserver/hack/verify-codegen.sh +++ b/staging/src/k8s.io/sample-apiserver/hack/verify-codegen.sh @@ -18,8 +18,7 @@ set -o errexit set -o nounset set -o pipefail -SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/.. -SCRIPT_BASE=${SCRIPT_ROOT}/../.. +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. DIFFROOT="${SCRIPT_ROOT}/pkg" TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg" diff --git a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json index a58978263bb..18d8ded2d56 100644 --- a/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-cli-plugin/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json index f125e65ca54..724f5704af1 100644 --- a/staging/src/k8s.io/sample-controller/Godeps/Godeps.json +++ b/staging/src/k8s.io/sample-controller/Godeps/Godeps.json @@ -12,7 +12,7 @@ }, { "ImportPath": "github.com/evanphx/json-patch", - "Rev": "36442dbdb585210f8d5a1b45e67aa323c197d5c4" + "Rev": "d4020504c68b6bfa818032bedfb48e33e9638506" }, { "ImportPath": "github.com/gogo/protobuf/proto", diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index b3d87b4e267..d45dbe4d0ab 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -17,7 +17,9 @@ limitations under the License. package common import ( + "bytes" "fmt" + "text/template" "time" "k8s.io/api/core/v1" @@ -63,6 +65,51 @@ var CommonImageWhiteList = sets.NewString( imageutils.GetE2EImage(imageutils.Net), ) +type testImagesStruct struct { + BusyBoxImage string + GBFrontendImage string + GBRedisSlaveImage string + KittenImage string + LivenessImage string + MounttestImage string + NautilusImage string + NginxImage string + NginxNewImage string + PauseImage string + RedisImage string +} + +var testImages testImagesStruct + +func init() { + testImages = testImagesStruct{ + imageutils.GetE2EImage(imageutils.BusyBox), + imageutils.GetE2EImage(imageutils.GBFrontend), + imageutils.GetE2EImage(imageutils.GBRedisSlave), + imageutils.GetE2EImage(imageutils.Kitten), + imageutils.GetE2EImage(imageutils.Liveness), + imageutils.GetE2EImage(imageutils.Mounttest), + imageutils.GetE2EImage(imageutils.Nautilus), + imageutils.GetE2EImage(imageutils.Nginx), + imageutils.GetE2EImage(imageutils.NginxNew), + imageutils.GetE2EImage(imageutils.Pause), + imageutils.GetE2EImage(imageutils.Redis), + } +} + +func SubstituteImageName(content string) string { + contentWithImageName := new(bytes.Buffer) + tmpl, err := template.New("imagemanifest").Parse(content) + if err != nil { + framework.Failf("Failed Parse the template: %v", err) + } + err = tmpl.Execute(contentWithImageName, testImages) + if err != nil { + framework.Failf("Failed executing template: %v", err) + } + return contentWithImageName.String() +} + func svcByName(name string, port int) *v1.Service { return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/examples.go b/test/e2e/examples.go index a4778420cfe..ec50cc4288f 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -28,6 +28,7 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -62,8 +63,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { framework.KubeDescribe("Liveness", func() { It("liveness pods should be automatically restarted", func() { test := "test/fixtures/doc-yaml/user-guide/liveness" - execYaml := readFile(test, "exec-liveness.yaml") - httpYaml := readFile(test, "http-liveness.yaml") + execYaml := readFile(test, "exec-liveness.yaml.in") + httpYaml := readFile(test, "http-liveness.yaml.in") nsFlag := fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(execYaml, "create", "-f", "-", nsFlag) @@ -111,7 +112,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { It("should create a pod that reads a secret", func() { test := "test/fixtures/doc-yaml/user-guide/secrets" secretYaml := readFile(test, "secret.yaml") - podYaml := readFile(test, "secret-pod.yaml") + podYaml := readFile(test, "secret-pod.yaml.in") nsFlag := fmt.Sprintf("--namespace=%v", ns) podName := "secret-test-pod" @@ -131,7 +132,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { framework.KubeDescribe("Downward API", func() { It("should create a pod that prints his name and namespace", func() { test := "test/fixtures/doc-yaml/user-guide/downward-api" - podYaml := readFile(test, "dapi-pod.yaml") + podYaml := readFile(test, "dapi-pod.yaml.in") nsFlag := fmt.Sprintf("--namespace=%v", ns) podName := "dapi-test-pod" @@ -151,5 +152,5 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { func readFile(test, file string) string { from := filepath.Join(test, file) - return string(testfiles.ReadOrDie(from, Fail)) + return commonutils.SubstituteImageName(string(testfiles.ReadOrDie(from, Fail))) } diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go index fce60531681..83adb80b4c6 100644 --- a/test/e2e/framework/metrics_util.go +++ b/test/e2e/framework/metrics_util.go @@ -137,7 +137,9 @@ var SchedulingLatencyMetricName = model.LabelValue(schedulermetric.SchedulerSubs var InterestingApiServerMetrics = []string{ "apiserver_request_total", - "apiserver_request_latency_seconds_summary", + // TODO(krzysied): apiserver_request_latencies_summary is a deprecated metric. + // It should be replaced with new metric. + "apiserver_request_latencies_summary", "etcd_helper_cache_entry_total", "etcd_helper_cache_hit_total", "etcd_helper_cache_miss_total", @@ -475,9 +477,9 @@ func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) { for _, sample := range samples { // Example line: - // apiserver_request_latency_seconds_summary{resource="namespaces",verb="LIST",quantile="0.99"} 0.000908 + // apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908 // apiserver_request_total{resource="pods",verb="LIST",client="kubectl",code="200",contentType="json"} 233 - if sample.Metric[model.MetricNameLabel] != "apiserver_request_latency_seconds_summary" && + if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" && sample.Metric[model.MetricNameLabel] != "apiserver_request_total" { continue } @@ -491,13 +493,13 @@ func readLatencyMetrics(c clientset.Interface) (*APIResponsiveness, error) { } switch sample.Metric[model.MetricNameLabel] { - case "apiserver_request_latency_seconds_summary": + case "apiserver_request_latencies_summary": latency := sample.Value quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64) if err != nil { return nil, err } - a.addMetricRequestLatency(resource, subresource, verb, scope, quantile, time.Duration(int64(latency))*time.Second) + a.addMetricRequestLatency(resource, subresource, verb, scope, quantile, time.Duration(int64(latency))*time.Microsecond) case "apiserver_request_total": count := sample.Value a.addMetricRequestCount(resource, subresource, verb, scope, int(count)) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 4c60c3e3173..7415de0d417 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -827,7 +827,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) { By("Deleting namespaces") nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "Failed to get namespace list") var deleted []string var wg sync.WaitGroup OUTER: @@ -1833,7 +1833,7 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error { Logf("Endpoint %s/%s is not ready yet", ns, name) continue } - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "Failed to get endpoints for %s/%s", ns, name) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { Logf("Endpoint %s/%s is not ready yet", ns, name) continue @@ -1865,7 +1865,7 @@ func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) { successes := 0 options := metav1.ListOptions{LabelSelector: r.label.String()} currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "Failed to get list of currentPods in namespace: %s", r.ns) for i, pod := range r.pods.Items { // Check that the replica list remains unchanged, otherwise we have problems. if !isElementOf(pod.UID, currentPods) { @@ -2496,7 +2496,7 @@ type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error func DumpEventsInNamespace(eventsLister EventsLister, namespace string) { By(fmt.Sprintf("Collecting events from namespace %q.", namespace)) events, err := eventsLister(metav1.ListOptions{}, namespace) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "failed to list events in namespace %q", namespace) By(fmt.Sprintf("Found %d events.", len(events.Items))) // Sort events by their first timestamp @@ -3479,7 +3479,7 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw tweak(execPod) } created, err := client.CoreV1().Pods(ns).Create(execPod) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "failed to create new exec pod in namespace: %s", ns) err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) if err != nil { @@ -3515,13 +3515,13 @@ func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]s }, } _, err := c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace %s", name, ns) } func DeletePodOrFail(c clientset.Interface, ns, name string) { By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) err := c.CoreV1().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace %s", name, ns) } // CheckPodsRunningReady returns whether all pods whose names are listed in @@ -4006,7 +4006,7 @@ func getApiserverRestartCount(c clientset.Interface) (int32, error) { } return s.RestartCount, nil } - return -1, fmt.Errorf("failed to find kube-apiserver container in pod") + return -1, fmt.Errorf("Failed to find kube-apiserver container in pod") } func RestartControllerManager() error { @@ -4222,7 +4222,7 @@ func headersForConfig(c *restclient.Config, url *url.URL) (http.Header, error) { func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) { tlsConfig, err := restclient.TLSConfigFor(config) if err != nil { - return nil, fmt.Errorf("failed to create tls config: %v", err) + return nil, fmt.Errorf("Failed to create tls config: %v", err) } if tlsConfig != nil { url.Scheme = "wss" @@ -4237,11 +4237,11 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st } headers, err := headersForConfig(config, url) if err != nil { - return nil, fmt.Errorf("failed to load http headers: %v", err) + return nil, fmt.Errorf("Failed to load http headers: %v", err) } cfg, err := websocket.NewConfig(url.String(), "http://localhost") if err != nil { - return nil, fmt.Errorf("failed to create websocket config: %v", err) + return nil, fmt.Errorf("Failed to create websocket config: %v", err) } cfg.Header = headers cfg.TlsConfig = tlsConfig @@ -5064,7 +5064,7 @@ func DsFromManifest(url string) (*apps.DaemonSet, error) { } if err != nil { - return nil, fmt.Errorf("failed to get url: %v", err) + return nil, fmt.Errorf("Failed to get url: %v", err) } if response.StatusCode != 200 { return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode) @@ -5073,17 +5073,17 @@ func DsFromManifest(url string) (*apps.DaemonSet, error) { data, err := ioutil.ReadAll(response.Body) if err != nil { - return nil, fmt.Errorf("failed to read html response body: %v", err) + return nil, fmt.Errorf("Failed to read html response body: %v", err) } json, err := utilyaml.ToJSON(data) if err != nil { - return nil, fmt.Errorf("failed to parse data to json: %v", err) + return nil, fmt.Errorf("Failed to parse data to json: %v", err) } err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &controller) if err != nil { - return nil, fmt.Errorf("failed to decode DaemonSet spec: %v", err) + return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err) } return &controller, nil } @@ -5147,7 +5147,7 @@ func WaitForNodeHasTaintOrNot(c clientset.Interface, nodeName string, taint *v1. if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { has, err := NodeHasTaint(c, nodeName, taint) if err != nil { - return false, fmt.Errorf("failed to check taint %s on node %s or not", taint.ToString(), nodeName) + return false, fmt.Errorf("Failed to check taint %s on node %s or not", taint.ToString(), nodeName) } return has == wantTrue, nil }); err != nil { diff --git a/test/e2e/kubectl/BUILD b/test/e2e/kubectl/BUILD index 971e90cecfa..1a29971bcdc 100644 --- a/test/e2e/kubectl/BUILD +++ b/test/e2e/kubectl/BUILD @@ -29,6 +29,7 @@ go_library( "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 72926eb0d17..078323e4565 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -37,7 +37,6 @@ import ( "sort" "strconv" "strings" - "text/template" "time" "github.com/elazarl/goproxy" @@ -57,6 +56,7 @@ import ( genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/controller" + commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/scheduling" @@ -98,25 +98,6 @@ var ( busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox) ) -var testImages = struct { - GBFrontendImage string - PauseImage string - NginxImage string - NginxNewImage string - RedisImage string - GBRedisSlaveImage string - NautilusImage string - KittenImage string -}{ - imageutils.GetE2EImage(imageutils.GBFrontend), - imageutils.GetE2EImage(imageutils.Pause), - imageutils.GetE2EImage(imageutils.Nginx), - imageutils.GetE2EImage(imageutils.NginxNew), - imageutils.GetE2EImage(imageutils.Redis), - imageutils.GetE2EImage(imageutils.GBRedisSlave), - imageutils.GetE2EImage(imageutils.Nautilus), - imageutils.GetE2EImage(imageutils.Kitten), -} var ( proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") @@ -138,19 +119,6 @@ func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) { framework.AssertCleanup(ns, selectors...) } -func substituteImageName(content string) string { - contentWithImageName := new(bytes.Buffer) - tmpl, err := template.New("imagemanifest").Parse(content) - if err != nil { - framework.Failf("Failed Parse the template: %v", err) - } - err = tmpl.Execute(contentWithImageName, testImages) - if err != nil { - framework.Failf("Failed executing template: %v", err) - } - return contentWithImageName.String() -} - func readTestFileOrDie(file string) []byte { return testfiles.ReadOrDie(path.Join(kubeCtlManifestPath, file), Fail) } @@ -294,8 +262,8 @@ var _ = SIGDescribe("Kubectl client", func() { var nautilus, kitten string BeforeEach(func() { updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" - nautilus = substituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), Fail))) - kitten = substituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), Fail))) + nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"), Fail))) + kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in"), Fail))) }) /* Release : v1.9 @@ -359,7 +327,7 @@ var _ = SIGDescribe("Kubectl client", func() { "redis-master-deployment.yaml.in", "redis-slave-deployment.yaml.in", } { - contents := substituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), Fail))) + contents := commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(guestbookRoot, gbAppFile), Fail))) run(contents) } } @@ -388,7 +356,7 @@ var _ = SIGDescribe("Kubectl client", func() { var podYaml string BeforeEach(func() { By(fmt.Sprintf("creating the pod from %v", podYaml)) - podYaml = substituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) + podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(BeTrue()) }) @@ -748,7 +716,7 @@ metadata: framework.KubeDescribe("Kubectl apply", func() { It("should apply a new configuration to an existing RC", func() { - controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) By("creating Redis RC") @@ -784,9 +752,9 @@ metadata: }) It("apply set/view last-applied", func() { - deployment1Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment1Filename))) - deployment2Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment2Filename))) - deployment3Yaml := substituteImageName(string(readTestFileOrDie(nginxDeployment3Filename))) + deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment1Filename))) + deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment2Filename))) + deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(nginxDeployment3Filename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) By("deployment replicas number is 2") @@ -864,7 +832,7 @@ metadata: kv, err := framework.KubectlVersion() Expect(err).NotTo(HaveOccurred()) framework.SkipUnlessServerVersionGTE(kv, c.Discovery()) - controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) serviceJson := readTestFileOrDie(redisServiceFilename) nsFlag := fmt.Sprintf("--namespace=%v", ns) @@ -969,7 +937,7 @@ metadata: Description: Create a Pod running redis master listening to port 6379. Using kubectl expose the redis master replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that redis master is listening. Using kubectl expose the redis master as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that redis master is listening. */ framework.ConformanceIt("should create services for rc ", func() { - controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) redisPort := 6379 @@ -1051,7 +1019,7 @@ metadata: var nsFlag string BeforeEach(func() { By("creating the pod") - podYaml = substituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) + podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(BeTrue()) @@ -1093,7 +1061,7 @@ metadata: BeforeEach(func() { By("creating the pod") nsFlag = fmt.Sprintf("--namespace=%v", ns) - podYaml = substituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) + podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(BeTrue()) }) @@ -1133,7 +1101,7 @@ metadata: containerName := "redis-master" BeforeEach(func() { By("creating an rc") - rc = substituteImageName(string(readTestFileOrDie(redisControllerFilename))) + rc = commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(rc, "create", "-f", "-", nsFlag) }) @@ -1209,7 +1177,7 @@ metadata: Description: Start running a redis master and a replication controller. When the pod is running, using ‘kubectl patch’ command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller. */ framework.ConformanceIt("should add annotations for pods in rc ", func() { - controllerJson := substituteImageName(string(readTestFileOrDie(redisControllerFilename))) + controllerJson := commonutils.SubstituteImageName(string(readTestFileOrDie(redisControllerFilename))) nsFlag := fmt.Sprintf("--namespace=%v", ns) By("creating Redis RC") framework.RunKubectlOrDieInput(controllerJson, "create", "-f", "-", nsFlag) diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index 5e2a95e2d9c..a89067886cd 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -377,9 +377,13 @@ func createClients(numberOfClients int) ([]clientset.Interface, []internalclient KeepAlive: 30 * time.Second, }).DialContext, }) + config.WrapTransport = transportConfig.WrapTransport + config.Dial = transportConfig.Dial // Overwrite TLS-related fields from config to avoid collision with // Transport field. config.TLSClientConfig = restclient.TLSClientConfig{} + config.AuthProvider = nil + config.ExecProvider = nil c, err := clientset.NewForConfig(config) if err != nil { diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index dbcc8a85925..f0b943061e6 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -455,7 +455,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) { latencyMetrics := framework.KubeletLatencyMetrics{} ms, err := metrics.GrabKubeletMetricsWithoutProxy(node) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "Failed to get kubelet metrics without proxy in node %s", node) for _, samples := range ms { for _, sample := range samples { diff --git a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml.in similarity index 95% rename from test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml rename to test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml.in index aecd746ec94..93abba27dea 100644 --- a/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/downward-api/dapi-pod.yaml.in @@ -5,7 +5,7 @@ metadata: spec: containers: - name: test-container - image: busybox + image: {{.BusyBoxImage}} command: [ "/bin/sh", "-c", "env" ] env: - name: MY_POD_NAME diff --git a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml.in similarity index 92% rename from test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml rename to test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml.in index c07db26c922..ba32fe42040 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/exec-liveness.yaml.in @@ -10,7 +10,7 @@ spec: - /bin/sh - -c - echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600 - image: busybox + image: {{.BusyBoxImage}} livenessProbe: exec: command: diff --git a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml.in similarity index 86% rename from test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml rename to test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml.in index 713ff6ee616..468bad6135e 100644 --- a/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml +++ b/test/fixtures/doc-yaml/user-guide/liveness/http-liveness.yaml.in @@ -8,7 +8,7 @@ spec: containers: - args: - /server - image: gcr.io/kubernetes-e2e-test-images/liveness:1.0 + image: {{.LivenessImage}} livenessProbe: httpGet: path: /healthz diff --git a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml.in similarity index 73% rename from test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml rename to test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml.in index 3448b8f57a0..ece33ee3a5d 100644 --- a/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml +++ b/test/fixtures/doc-yaml/user-guide/secrets/secret-pod.yaml.in @@ -5,8 +5,8 @@ metadata: spec: containers: - name: test-container - image: gcr.io/kubernetes-e2e-test-images/mounttest:1.0 - command: [ "/mt", "--file_content=/etc/secret-volume/data-1" ] + image: {{.MounttestImage}} + command: [ "/mounttest", "--file_content=/etc/secret-volume/data-1" ] volumeMounts: # name must match the volume name below - name: secret-volume diff --git a/test/images/pets/zookeeper-installer/Dockerfile b/test/images/pets/zookeeper-installer/Dockerfile index 020de082fee..579862ee713 100644 --- a/test/images/pets/zookeeper-installer/Dockerfile +++ b/test/images/pets/zookeeper-installer/Dockerfile @@ -18,7 +18,7 @@ FROM BASEIMAGE CROSS_BUILD_COPY qemu-QEMUARCH-static /usr/bin/ -RUN clean-install wget netcat +RUN clean-install wget bash netcat ADD on-start.sh / diff --git a/test/images/pets/zookeeper-installer/VERSION b/test/images/pets/zookeeper-installer/VERSION index 5625e59da88..7e32cd56983 100644 --- a/test/images/pets/zookeeper-installer/VERSION +++ b/test/images/pets/zookeeper-installer/VERSION @@ -1 +1 @@ -1.2 +1.3 diff --git a/test/integration/etcd/BUILD b/test/integration/etcd/BUILD index 886566f7ffd..700a438d666 100644 --- a/test/integration/etcd/BUILD +++ b/test/integration/etcd/BUILD @@ -6,6 +6,7 @@ go_test( name = "go_default_test", size = "large", srcs = [ + "etcd_cross_group_test.go", "etcd_storage_path_test.go", "main_test.go", ], @@ -24,6 +25,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//test/integration/framework:go_default_library", "//vendor/github.com/coreos/etcd/clientv3:go_default_library", diff --git a/test/integration/etcd/etcd_cross_group_test.go b/test/integration/etcd/etcd_cross_group_test.go new file mode 100644 index 00000000000..d801aea7c7b --- /dev/null +++ b/test/integration/etcd/etcd_cross_group_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "context" + "encoding/json" + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" +) + +// TestCrossGroupStorage tests to make sure that all objects stored in an expected location in etcd can be converted/read. +func TestCrossGroupStorage(t *testing.T) { + master := StartRealMasterOrDie(t, func(opts *options.ServerRunOptions) { + // force enable all resources so we can check storage. + // TODO: drop these once we stop allowing them to be served. + opts.APIEnablement.RuntimeConfig["extensions/v1beta1/deployments"] = "true" + opts.APIEnablement.RuntimeConfig["extensions/v1beta1/daemonsets"] = "true" + opts.APIEnablement.RuntimeConfig["extensions/v1beta1/replicasets"] = "true" + opts.APIEnablement.RuntimeConfig["extensions/v1beta1/podsecuritypolicies"] = "true" + opts.APIEnablement.RuntimeConfig["extensions/v1beta1/networkpolicies"] = "true" + }) + defer master.Cleanup() + + etcdStorageData := GetEtcdStorageData() + + crossGroupResources := map[schema.GroupVersionKind][]Resource{} + + master.Client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: testNamespace}}) + + // Group by persisted GVK + for _, resourceToPersist := range master.Resources { + gvk := resourceToPersist.Mapping.GroupVersionKind + data, exists := etcdStorageData[resourceToPersist.Mapping.Resource] + if !exists { + continue + } + storageGVK := gvk + if data.ExpectedGVK != nil { + storageGVK = *data.ExpectedGVK + } + crossGroupResources[storageGVK] = append(crossGroupResources[storageGVK], resourceToPersist) + } + + // Clear any without cross-group sources + for gvk, resources := range crossGroupResources { + groups := sets.NewString() + for _, resource := range resources { + groups.Insert(resource.Mapping.GroupVersionKind.Group) + } + if len(groups) < 2 { + delete(crossGroupResources, gvk) + } + } + + if len(crossGroupResources) == 0 { + // Sanity check + t.Fatal("no cross-group resources found") + } + + // Test all potential cross-group sources can be watched and fetched from all other sources + for gvk, resources := range crossGroupResources { + t.Run(gvk.String(), func(t *testing.T) { + // use the first one to create the initial object + resource := resources[0] + + // compute namespace + ns := "" + if resource.Mapping.Scope.Name() == meta.RESTScopeNameNamespace { + ns = testNamespace + } + + data := etcdStorageData[resource.Mapping.Resource] + // create object + resourceClient, obj, err := JSONToUnstructured(data.Stub, ns, resource.Mapping, master.Dynamic) + if err != nil { + t.Fatal(err) + } + actual, err := resourceClient.Create(obj, metav1.CreateOptions{}) + if err != nil { + t.Fatal(err) + } + name := actual.GetName() + + // Set up clients, versioned data, and watches for all versions + var ( + clients = map[schema.GroupVersionResource]dynamic.ResourceInterface{} + versionedData = map[schema.GroupVersionResource]*unstructured.Unstructured{} + watches = map[schema.GroupVersionResource]watch.Interface{} + ) + for _, resource := range resources { + clients[resource.Mapping.Resource] = master.Dynamic.Resource(resource.Mapping.Resource).Namespace(ns) + versionedData[resource.Mapping.Resource], err = clients[resource.Mapping.Resource].Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("error finding resource via %s: %v", resource.Mapping.Resource.GroupVersion().String(), err) + } + watches[resource.Mapping.Resource], err = clients[resource.Mapping.Resource].Watch(metav1.ListOptions{ResourceVersion: actual.GetResourceVersion()}) + if err != nil { + t.Fatalf("error opening watch via %s: %v", resource.Mapping.Resource.GroupVersion().String(), err) + } + } + + for _, resource := range resources { + // clear out the things cleared in etcd + versioned := versionedData[resource.Mapping.Resource] + versioned.SetResourceVersion("") + versioned.SetSelfLink("") + versionedJSON, err := versioned.MarshalJSON() + if err != nil { + t.Error(err) + continue + } + + // Update in etcd + if _, err := master.KV.Put(context.Background(), data.ExpectedEtcdPath, string(versionedJSON)); err != nil { + t.Error(err) + continue + } + t.Logf("wrote %s to etcd", resource.Mapping.Resource.GroupVersion().String()) + + // Ensure everyone gets a watch event with the right version + for watchResource, watcher := range watches { + select { + case event, ok := <-watcher.ResultChan(): + if !ok { + t.Fatalf("watch of %s closed in response to persisting %s", watchResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String()) + } + if event.Type != watch.Modified { + eventJSON, _ := json.Marshal(event) + t.Errorf("unexpected watch event sent to watch of %s in response to persisting %s: %s", watchResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String(), string(eventJSON)) + continue + } + if event.Object.GetObjectKind().GroupVersionKind().GroupVersion() != watchResource.GroupVersion() { + t.Errorf("unexpected group version object sent to watch of %s in response to persisting %s: %#v", watchResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String(), event.Object) + continue + } + t.Logf(" received event for %s", watchResource.GroupVersion().String()) + case <-time.After(30 * time.Second): + t.Errorf("timed out waiting for watch event for %s in response to persisting %s", watchResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String()) + continue + } + } + + // Ensure everyone can do a direct get and gets the right version + for clientResource, client := range clients { + obj, err := client.Get(name, metav1.GetOptions{}) + if err != nil { + t.Errorf("error looking up %s after persisting %s", clientResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String()) + continue + } + if obj.GetObjectKind().GroupVersionKind().GroupVersion() != clientResource.GroupVersion() { + t.Errorf("unexpected group version retrieved from %s after persisting %s: %#v", clientResource.GroupVersion().String(), resource.Mapping.Resource.GroupVersion().String(), obj) + continue + } + t.Logf(" fetched object for %s", clientResource.GroupVersion().String()) + } + } + }) + } +} diff --git a/vendor/BUILD b/vendor/BUILD index 03d55b2154d..390722226f1 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -474,6 +474,7 @@ filegroup( "//vendor/k8s.io/utils/io:all-srcs", "//vendor/k8s.io/utils/keymutex:all-srcs", "//vendor/k8s.io/utils/nsenter:all-srcs", + "//vendor/k8s.io/utils/path:all-srcs", "//vendor/k8s.io/utils/pointer:all-srcs", "//vendor/k8s.io/utils/strings:all-srcs", "//vendor/k8s.io/utils/trace:all-srcs", diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 13b90420ca1..ad011b2fac6 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -25,6 +25,23 @@ go get -u github.com/evanphx/json-patch * [Comparing JSON documents](#comparing-json-documents) * [Combine merge patches](#combine-merge-patches) + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.ArraySizeLimit`, which + limits the length of any array the patched object can have. It defaults to 0, + which means there is no limit. + +* There is a global configuration variable `jsonpatch.ArraySizeAdditionLimit`, + which limits the increase of array length caused by each operation. It + defaults to 0, which means there is no limit. + ## Create and apply a merge patch Given both an original JSON document and a modified JSON document, you can create a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index 3d76e9e38b0..b1ec06ae173 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -14,6 +14,10 @@ const ( eAry ) +var SupportNegativeIndices bool = true +var ArraySizeLimit int = 0 +var ArraySizeAdditionLimit int = 0 + type lazyNode struct { raw *json.RawMessage doc partialDoc @@ -61,6 +65,19 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error { return nil } +func deepCopy(src *lazyNode) (*lazyNode, error) { + if src == nil { + return nil, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, err + } + ra := make(json.RawMessage, len(a)) + copy(ra, a) + return newLazyNode(&ra), nil +} + func (n *lazyNode) intoDoc() (*partialDoc, error) { if n.which == eDoc { return &n.doc, nil @@ -354,10 +371,19 @@ func (d *partialArray) set(key string, val *lazyNode) error { } sz := len(*d) + + if diff := idx + 1 - sz; ArraySizeAdditionLimit > 0 && diff > ArraySizeAdditionLimit { + return fmt.Errorf("Unable to increase the array size by %d, the limit is %d", diff, ArraySizeAdditionLimit) + } + if idx+1 > sz { sz = idx + 1 } + if ArraySizeLimit > 0 && sz > ArraySizeLimit { + return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit) + } + ary := make([]*lazyNode, sz) cur := *d @@ -385,17 +411,29 @@ func (d *partialArray) add(key string, val *lazyNode) error { return err } - ary := make([]*lazyNode, len(*d)+1) + sz := len(*d) + 1 + if ArraySizeLimit > 0 && sz > ArraySizeLimit { + return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit) + } + + ary := make([]*lazyNode, sz) cur := *d - if idx < -len(ary) || idx >= len(ary) { + if idx >= len(ary) { return fmt.Errorf("Unable to access invalid index: %d", idx) } - if idx < 0 { - idx += len(ary) + if SupportNegativeIndices { + if idx < -len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } } + copy(ary[0:idx], cur[0:idx]) ary[idx] = val copy(ary[idx+1:], cur[idx:]) @@ -426,11 +464,18 @@ func (d *partialArray) remove(key string) error { cur := *d - if idx < -len(cur) || idx >= len(cur) { - return fmt.Errorf("Unable to remove invalid index: %d", idx) + if idx >= len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) } - if idx < 0 { - idx += len(cur) + + if SupportNegativeIndices { + if idx < -len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } } ary := make([]*lazyNode, len(cur)-1) @@ -567,7 +612,12 @@ func (p Patch) copy(doc *container, op operation) error { return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) } - return con.set(key, val) + valCopy, err := deepCopy(val) + if err != nil { + return err + } + + return con.add(key, valCopy) } // Equal indicates if 2 JSON documents have the same structural equality. diff --git a/vendor/k8s.io/utils/path/BUILD b/vendor/k8s.io/utils/path/BUILD new file mode 100644 index 00000000000..43e3f6f88d6 --- /dev/null +++ b/vendor/k8s.io/utils/path/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["file.go"], + importmap = "k8s.io/kubernetes/vendor/k8s.io/utils/path", + importpath = "k8s.io/utils/path", + visibility = ["//visibility:public"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/vendor/k8s.io/utils/path/file.go b/vendor/k8s.io/utils/path/file.go new file mode 100644 index 00000000000..a57285d3b45 --- /dev/null +++ b/vendor/k8s.io/utils/path/file.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path + +import ( + "errors" + "os" +) + +// LinkTreatment is the base type for constants used by Exists that indicate +// how symlinks are treated for existence checks. +type LinkTreatment int + +const ( + // CheckFollowSymlink follows the symlink and verifies that the target of + // the symlink exists. + CheckFollowSymlink LinkTreatment = iota + + // CheckSymlinkOnly does not follow the symlink and verfies only that they + // symlink itself exists. + CheckSymlinkOnly +) + +// ErrInvalidLinkTreatment indicates that the link treatment behavior requested +// is not a valid behavior. +var ErrInvalidLinkTreatment = errors.New("unknown link behavior") + +// Exists checks if specified file, directory, or symlink exists. The behavior +// of the test depends on the linkBehaviour argument. See LinkTreatment for +// more details. +func Exists(linkBehavior LinkTreatment, filename string) (bool, error) { + var err error + + if linkBehavior == CheckFollowSymlink { + _, err = os.Stat(filename) + } else if linkBehavior == CheckSymlinkOnly { + _, err = os.Lstat(filename) + } else { + return false, ErrInvalidLinkTreatment + } + + if os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +}