diff --git a/build/visible_to/BUILD b/build/visible_to/BUILD index 39784824551..0d5bb841879 100644 --- a/build/visible_to/BUILD +++ b/build/visible_to/BUILD @@ -212,7 +212,6 @@ package_group( "//staging/src/k8s.io/kubectl/pkg/cmd/portforward", "//staging/src/k8s.io/kubectl/pkg/cmd/proxy", "//staging/src/k8s.io/kubectl/pkg/cmd/replace", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate", "//staging/src/k8s.io/kubectl/pkg/cmd/rollout", "//staging/src/k8s.io/kubectl/pkg/cmd/run", "//staging/src/k8s.io/kubectl/pkg/cmd/scale", @@ -260,7 +259,6 @@ package_group( "//staging/src/k8s.io/kubectl/pkg/cmd/patch", "//staging/src/k8s.io/kubectl/pkg/cmd/portforward", "//staging/src/k8s.io/kubectl/pkg/cmd/replace", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate", "//staging/src/k8s.io/kubectl/pkg/cmd/rollout", "//staging/src/k8s.io/kubectl/pkg/cmd/run", "//staging/src/k8s.io/kubectl/pkg/cmd/set", @@ -312,7 +310,6 @@ package_group( "//staging/src/k8s.io/kubectl/pkg/cmd/portforward", "//staging/src/k8s.io/kubectl/pkg/cmd/proxy", "//staging/src/k8s.io/kubectl/pkg/cmd/replace", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate", "//staging/src/k8s.io/kubectl/pkg/cmd/rollout", "//staging/src/k8s.io/kubectl/pkg/cmd/run", "//staging/src/k8s.io/kubectl/pkg/cmd/scale", @@ -423,7 +420,6 @@ package_group( "//staging/src/k8s.io/kubectl/pkg/cmd", "//staging/src/k8s.io/kubectl/pkg/cmd/apply", "//staging/src/k8s.io/kubectl/pkg/cmd/replace", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate", "//staging/src/k8s.io/kubectl/pkg/cmd/testing", "//staging/src/k8s.io/kubectl/pkg/cmd/util", ], diff --git a/hack/.golint_failures b/hack/.golint_failures index 40e538bbbe9..78cf0ddba45 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -480,7 +480,6 @@ staging/src/k8s.io/kubectl/pkg/cmd/plugin staging/src/k8s.io/kubectl/pkg/cmd/portforward staging/src/k8s.io/kubectl/pkg/cmd/proxy staging/src/k8s.io/kubectl/pkg/cmd/replace -staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate staging/src/k8s.io/kubectl/pkg/cmd/rollout staging/src/k8s.io/kubectl/pkg/cmd/run staging/src/k8s.io/kubectl/pkg/cmd/scale diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index 8f242ae285e..f2870ebe4f3 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -97,7 +97,6 @@ vendor/k8s.io/kubectl/pkg/cmd/config vendor/k8s.io/kubectl/pkg/cmd/edit vendor/k8s.io/kubectl/pkg/cmd/exec vendor/k8s.io/kubectl/pkg/cmd/get -vendor/k8s.io/kubectl/pkg/cmd/rollingupdate vendor/k8s.io/kubectl/pkg/cmd/scale vendor/k8s.io/kubectl/pkg/cmd/set vendor/k8s.io/kubectl/pkg/cmd/testing diff --git a/pkg/kubectl/cmd/BUILD b/pkg/kubectl/cmd/BUILD index 74bd6c16c5e..7728b49129f 100644 --- a/pkg/kubectl/cmd/BUILD +++ b/pkg/kubectl/cmd/BUILD @@ -44,7 +44,6 @@ go_library( "//staging/src/k8s.io/kubectl/pkg/cmd/portforward:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/proxy:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/replace:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/rollout:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/run:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/scale:go_default_library", diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 76c5a8f8f4f..98b9a98adc2 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -58,7 +58,6 @@ import ( "k8s.io/kubectl/pkg/cmd/portforward" "k8s.io/kubectl/pkg/cmd/proxy" "k8s.io/kubectl/pkg/cmd/replace" - "k8s.io/kubectl/pkg/cmd/rollingupdate" "k8s.io/kubectl/pkg/cmd/rollout" "k8s.io/kubectl/pkg/cmd/run" "k8s.io/kubectl/pkg/cmd/scale" @@ -265,10 +264,6 @@ __kubectl_custom_func() { __kubectl_get_resource_pod return ;; - kubectl_rolling-update) - __kubectl_get_resource_rc - return - ;; kubectl_cordon | kubectl_uncordon | kubectl_drain | kubectl_top_node) __kubectl_get_resource_node return @@ -507,7 +502,6 @@ func NewKubectlCommand(in io.Reader, out, err io.Writer) *cobra.Command { Message: "Deploy Commands:", Commands: []*cobra.Command{ rollout.NewCmdRollout(f, ioStreams), - rollingupdate.NewCmdRollingUpdate(f, ioStreams), scale.NewCmdScale(f, ioStreams), autoscale.NewCmdAutoscale(f, ioStreams), }, diff --git a/staging/src/k8s.io/kubectl/go.sum b/staging/src/k8s.io/kubectl/go.sum index 819059cde6d..c53a6687640 100644 --- a/staging/src/k8s.io/kubectl/go.sum +++ b/staging/src/k8s.io/kubectl/go.sum @@ -122,6 +122,7 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -151,6 +152,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -161,6 +163,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -181,6 +184,7 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -203,15 +207,19 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/BUILD index 1bd588cb8fa..5a557905a86 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/BUILD @@ -56,7 +56,6 @@ filegroup( "//staging/src/k8s.io/kubectl/pkg/cmd/portforward:all-srcs", "//staging/src/k8s.io/kubectl/pkg/cmd/proxy:all-srcs", "//staging/src/k8s.io/kubectl/pkg/cmd/replace:all-srcs", - "//staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate:all-srcs", "//staging/src/k8s.io/kubectl/pkg/cmd/rollout:all-srcs", "//staging/src/k8s.io/kubectl/pkg/cmd/run:all-srcs", "//staging/src/k8s.io/kubectl/pkg/cmd/scale:all-srcs", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/BUILD deleted file mode 100644 index e0f9badb8f6..00000000000 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/BUILD +++ /dev/null @@ -1,86 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "rolling_updater.go", - "rollingupdate.go", - ], - importmap = "k8s.io/kubernetes/vendor/k8s.io/kubectl/pkg/cmd/rollingupdate", - importpath = "k8s.io/kubectl/pkg/cmd/rollingupdate", - visibility = ["//visibility:public"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/scale:go_default_library", - "//staging/src/k8s.io/client-go/util/retry:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/scale:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util/deployment:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util/i18n:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util/podutils:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util/templates:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/validation:go_default_library", - "//vendor/github.com/spf13/cobra:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - "//vendor/k8s.io/utils/integer:go_default_library", - "//vendor/k8s.io/utils/pointer:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "rolling_updater_test.go", - "rollingupdate_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", - "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/rest/fake:go_default_library", - "//staging/src/k8s.io/client-go/testing:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/cmd/testing:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/scale:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library", - "//staging/src/k8s.io/kubectl/pkg/util:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go deleted file mode 100644 index d955ac9117c..00000000000 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater.go +++ /dev/null @@ -1,865 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rollingupdate - -import ( - "context" - "fmt" - "io" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - scaleclient "k8s.io/client-go/scale" - "k8s.io/client-go/util/retry" - "k8s.io/kubectl/pkg/scale" - "k8s.io/kubectl/pkg/util" - deploymentutil "k8s.io/kubectl/pkg/util/deployment" - "k8s.io/kubectl/pkg/util/podutils" - "k8s.io/utils/integer" - utilpointer "k8s.io/utils/pointer" -) - -func valOrZero(val *int32) int32 { - if val == nil { - return int32(0) - } - return *val -} - -const ( - kubectlAnnotationPrefix = "kubectl.kubernetes.io/" - sourceIDAnnotation = kubectlAnnotationPrefix + "update-source-id" - desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" - originalReplicasAnnotation = kubectlAnnotationPrefix + "original-replicas" - nextControllerAnnotation = kubectlAnnotationPrefix + "next-controller-id" -) - -// RollingUpdaterConfig is the configuration for a rolling deployment process. -type RollingUpdaterConfig struct { - // Out is a writer for progress output. - Out io.Writer - // OldRC is an existing controller to be replaced. - OldRc *corev1.ReplicationController - // NewRc is a controller that will take ownership of updated pods (will be - // created if needed). - NewRc *corev1.ReplicationController - // UpdatePeriod is the time to wait between individual pod updates. - UpdatePeriod time.Duration - // Interval is the time to wait between polling controller status after - // update. - Interval time.Duration - // Timeout is the time to wait for controller updates before giving up. - Timeout time.Duration - // MinReadySeconds is the number of seconds to wait after the pods are ready - MinReadySeconds int32 - // CleanupPolicy defines the cleanup action to take after the deployment is - // complete. - CleanupPolicy RollingUpdaterCleanupPolicy - // MaxUnavailable is the maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding up. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - MaxUnavailable intstr.IntOrString - // MaxSurge is the maximum number of pods that can be scheduled above the desired number of pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately - // when the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, new RC can be scaled up - // further, ensuring that total number of pods running at any time during - // the update is at most 130% of desired pods. - MaxSurge intstr.IntOrString - // OnProgress is invoked if set during each scale cycle, to allow the caller to perform additional logic or - // abort the scale. If an error is returned the cleanup method will not be invoked. The percentage value - // is a synthetic "progress" calculation that represents the approximate percentage completion. - OnProgress func(oldRc, newRc *corev1.ReplicationController, percentage int) error -} - -// RollingUpdaterCleanupPolicy is a cleanup action to take after the -// deployment is complete. -type RollingUpdaterCleanupPolicy string - -const ( - // DeleteRollingUpdateCleanupPolicy means delete the old controller. - DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" - // PreserveRollingUpdateCleanupPolicy means keep the old controller. - PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" - // RenameRollingUpdateCleanupPolicy means delete the old controller, and rename - // the new controller to the name of the old controller. - RenameRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Rename" -) - -// RollingUpdater provides methods for updating replicated pods in a predictable, -// fault-tolerant way. -type RollingUpdater struct { - rcClient corev1client.ReplicationControllersGetter - podClient corev1client.PodsGetter - scaleClient scaleclient.ScalesGetter - // Namespace for resources - ns string - // scaleAndWait scales a controller and returns its updated state. - scaleAndWait func(rc *corev1.ReplicationController, retry *scale.RetryParams, wait *scale.RetryParams) (*corev1.ReplicationController, error) - //getOrCreateTargetController gets and validates an existing controller or - //makes a new one. - getOrCreateTargetController func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) - // cleanup performs post deployment cleanup tasks for newRc and oldRc. - cleanup func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error - // getReadyPods returns the amount of old and new ready pods. - getReadyPods func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) - // nowFn returns the current time used to calculate the minReadySeconds - nowFn func() metav1.Time -} - -// NewRollingUpdater creates a RollingUpdater from a client. -func NewRollingUpdater(namespace string, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, sc scaleclient.ScalesGetter) *RollingUpdater { - updater := &RollingUpdater{ - rcClient: rcClient, - podClient: podClient, - scaleClient: sc, - ns: namespace, - } - // Inject real implementations. - updater.scaleAndWait = updater.scaleAndWaitWithScaler - updater.getOrCreateTargetController = updater.getOrCreateTargetControllerWithClient - updater.getReadyPods = updater.readyPods - updater.cleanup = updater.cleanupWithClients - updater.nowFn = func() metav1.Time { return metav1.Now() } - return updater -} - -// Update all pods for a ReplicationController (oldRc) by creating a new -// controller (newRc) with 0 replicas, and synchronously scaling oldRc and -// newRc until oldRc has 0 replicas and newRc has the original # of desired -// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. -// -// Each interval, the updater will attempt to make progress however it can -// without violating any availability constraints defined by the config. This -// means the amount scaled up or down each interval will vary based on the -// timeliness of readiness and the updater will always try to make progress, -// even slowly. -// -// If an update from newRc to oldRc is already in progress, we attempt to -// drive it to completion. If an error occurs at any step of the update, the -// error will be returned. -// -// A scaling event (either up or down) is considered progress; if no progress -// is made within the config.Timeout, an error is returned. -// -// TODO: make this handle performing a rollback of a partially completed -// rollout. -func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { - out := config.Out - oldRc := config.OldRc - scaleRetryParams := scale.NewRetryParams(config.Interval, config.Timeout) - - // Find an existing controller (for continuing an interrupted update) or - // create a new one if necessary. - sourceID := fmt.Sprintf("%s:%s", oldRc.Name, oldRc.UID) - newRc, existed, err := r.getOrCreateTargetController(config.NewRc, sourceID) - if err != nil { - return err - } - if existed { - fmt.Fprintf(out, "Continuing update with existing controller %s.\n", newRc.Name) - } else { - fmt.Fprintf(out, "Created %s\n", newRc.Name) - } - // Extract the desired replica count from the controller. - desiredAnnotation, err := strconv.Atoi(newRc.Annotations[desiredReplicasAnnotation]) - if err != nil { - return fmt.Errorf("Unable to parse annotation for %s: %s=%s", - newRc.Name, desiredReplicasAnnotation, newRc.Annotations[desiredReplicasAnnotation]) - } - desired := int32(desiredAnnotation) - // Extract the original replica count from the old controller, adding the - // annotation if it doesn't yet exist. - _, hasOriginalAnnotation := oldRc.Annotations[originalReplicasAnnotation] - if !hasOriginalAnnotation { - existing, err := r.rcClient.ReplicationControllers(oldRc.Namespace).Get(context.TODO(), oldRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - originReplicas := strconv.Itoa(int(valOrZero(existing.Spec.Replicas))) - applyUpdate := func(rc *corev1.ReplicationController) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[originalReplicasAnnotation] = originReplicas - } - if oldRc, err = updateRcWithRetries(r.rcClient, existing.Namespace, existing, applyUpdate); err != nil { - return err - } - } - // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods - // that can be unavailable during a rollout. - maxSurge, maxUnavailable, err := deploymentutil.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) - if err != nil { - return err - } - // Validate maximums. - if desired > 0 && maxUnavailable == 0 && maxSurge == 0 { - return fmt.Errorf("one of maxSurge or maxUnavailable must be specified") - } - // The minimum pods which must remain available throughout the update - // calculated for internal convenience. - minAvailable := int32(integer.IntMax(0, int(desired-maxUnavailable))) - // If the desired new scale is 0, then the max unavailable is necessarily - // the effective scale of the old RC regardless of the configuration - // (equivalent to 100% maxUnavailable). - if desired == 0 { - maxUnavailable = valOrZero(oldRc.Spec.Replicas) - minAvailable = 0 - } - - fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n", - newRc.Name, valOrZero(newRc.Spec.Replicas), desired, oldRc.Name, valOrZero(oldRc.Spec.Replicas), minAvailable, desired+maxSurge) - - // give a caller incremental notification and allow them to exit early - goal := desired - valOrZero(newRc.Spec.Replicas) - if goal < 0 { - goal = -goal - } - progress := func(complete bool) error { - if config.OnProgress == nil { - return nil - } - progress := desired - valOrZero(newRc.Spec.Replicas) - if progress < 0 { - progress = -progress - } - percentage := 100 - if !complete && goal > 0 { - percentage = int((goal - progress) * 100 / goal) - } - return config.OnProgress(oldRc, newRc, percentage) - } - - // Scale newRc and oldRc until newRc has the desired number of replicas and - // oldRc has 0 replicas. - progressDeadline := time.Now().UnixNano() + config.Timeout.Nanoseconds() - for valOrZero(newRc.Spec.Replicas) != desired || valOrZero(oldRc.Spec.Replicas) != 0 { - // Store the existing replica counts for progress timeout tracking. - newReplicas := valOrZero(newRc.Spec.Replicas) - oldReplicas := valOrZero(oldRc.Spec.Replicas) - - // Scale up as much as possible. - scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config) - if err != nil { - return err - } - newRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // Wait between scaling operations for things to settle. - time.Sleep(config.UpdatePeriod) - - // Scale down as much as possible. - scaledRc, err = r.scaleDown(newRc, oldRc, desired, minAvailable, maxUnavailable, maxSurge, config) - if err != nil { - return err - } - oldRc = scaledRc - - // notify the caller if necessary - if err := progress(false); err != nil { - return err - } - - // If we are making progress, continue to advance the progress deadline. - // Otherwise, time out with an error. - progressMade := (valOrZero(newRc.Spec.Replicas) != newReplicas) || (valOrZero(oldRc.Spec.Replicas) != oldReplicas) - if progressMade { - progressDeadline = time.Now().UnixNano() + config.Timeout.Nanoseconds() - } else if time.Now().UnixNano() > progressDeadline { - return fmt.Errorf("timed out waiting for any update progress to be made") - } - } - - // notify the caller if necessary - if err := progress(true); err != nil { - return err - } - - // Housekeeping and cleanup policy execution. - return r.cleanup(oldRc, newRc, config) -} - -// scaleUp scales up newRc to desired by whatever increment is possible given -// the configured surge threshold. scaleUp will safely no-op as necessary when -// it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleUp(newRc, oldRc *corev1.ReplicationController, desired, maxSurge, maxUnavailable int32, scaleRetryParams *scale.RetryParams, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) { - // If we're already at the desired, do nothing. - if valOrZero(newRc.Spec.Replicas) == desired { - return newRc, nil - } - - // Scale up as far as we can based on the surge limit. - increment := (desired + maxSurge) - (valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas)) - // If the old is already scaled down, go ahead and scale all the way up. - if valOrZero(oldRc.Spec.Replicas) == 0 { - increment = desired - valOrZero(newRc.Spec.Replicas) - } - // We can't scale up without violating the surge limit, so do nothing. - if increment <= 0 { - return newRc, nil - } - // Increase the replica count, and deal with fenceposts. - nextVal := valOrZero(newRc.Spec.Replicas) + increment - newRc.Spec.Replicas = &nextVal - if valOrZero(newRc.Spec.Replicas) > desired { - newRc.Spec.Replicas = &desired - } - // Perform the scale-up. - fmt.Fprintf(config.Out, "Scaling %s up to %d\n", newRc.Name, valOrZero(newRc.Spec.Replicas)) - scaledRc, err := r.scaleAndWait(newRc, scaleRetryParams, scaleRetryParams) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scaleDown scales down oldRc to 0 at whatever decrement possible given the -// thresholds defined on the config. scaleDown will safely no-op as necessary -// when it detects redundancy or other relevant conditions. -func (r *RollingUpdater) scaleDown(newRc, oldRc *corev1.ReplicationController, desired, minAvailable, maxUnavailable, maxSurge int32, config *RollingUpdaterConfig) (*corev1.ReplicationController, error) { - // Already scaled down; do nothing. - if valOrZero(oldRc.Spec.Replicas) == 0 { - return oldRc, nil - } - // Get ready pods. We shouldn't block, otherwise in case both old and new - // pods are unavailable then the rolling update process blocks. - // Timeout-wise we are already covered by the progress check. - _, newAvailable, err := r.getReadyPods(oldRc, newRc, config.MinReadySeconds) - if err != nil { - return nil, err - } - // The old controller is considered as part of the total because we want to - // maintain minimum availability even with a volatile old controller. - // Scale down as much as possible while maintaining minimum availability - allPods := valOrZero(oldRc.Spec.Replicas) + valOrZero(newRc.Spec.Replicas) - newUnavailable := valOrZero(newRc.Spec.Replicas) - newAvailable - decrement := allPods - minAvailable - newUnavailable - // The decrement normally shouldn't drop below 0 because the available count - // always starts below the old replica count, but the old replica count can - // decrement due to externalities like pods death in the replica set. This - // will be considered a transient condition; do nothing and try again later - // with new readiness values. - // - // If the most we can scale is 0, it means we can't scale down without - // violating the minimum. Do nothing and try again later when conditions may - // have changed. - if decrement <= 0 { - return oldRc, nil - } - // Reduce the replica count, and deal with fenceposts. - nextOldVal := valOrZero(oldRc.Spec.Replicas) - decrement - oldRc.Spec.Replicas = &nextOldVal - if valOrZero(oldRc.Spec.Replicas) < 0 { - oldRc.Spec.Replicas = utilpointer.Int32Ptr(0) - } - // If the new is already fully scaled and available up to the desired size, go - // ahead and scale old all the way down. - if valOrZero(newRc.Spec.Replicas) == desired && newAvailable == desired { - oldRc.Spec.Replicas = utilpointer.Int32Ptr(0) - } - // Perform the scale-down. - fmt.Fprintf(config.Out, "Scaling %s down to %d\n", oldRc.Name, valOrZero(oldRc.Spec.Replicas)) - retryWait := &scale.RetryParams{ - Interval: config.Interval, - Timeout: config.Timeout, - } - scaledRc, err := r.scaleAndWait(oldRc, retryWait, retryWait) - if err != nil { - return nil, err - } - return scaledRc, nil -} - -// scalerScaleAndWait scales a controller using a Scaler and a real client. -func (r *RollingUpdater) scaleAndWaitWithScaler(rc *corev1.ReplicationController, retry *scale.RetryParams, wait *scale.RetryParams) (*corev1.ReplicationController, error) { - scaler := scale.NewScaler(r.scaleClient) - if err := scaler.Scale(rc.Namespace, rc.Name, uint(valOrZero(rc.Spec.Replicas)), &scale.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, wait, corev1.SchemeGroupVersion.WithResource("replicationcontrollers")); err != nil { - return nil, err - } - return r.rcClient.ReplicationControllers(rc.Namespace).Get(context.TODO(), rc.Name, metav1.GetOptions{}) -} - -// readyPods returns the old and new ready counts for their pods. -// If a pod is observed as being ready, it's considered ready even -// if it later becomes notReady. -func (r *RollingUpdater) readyPods(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) { - controllers := []*corev1.ReplicationController{oldRc, newRc} - oldReady := int32(0) - newReady := int32(0) - if r.nowFn == nil { - r.nowFn = func() metav1.Time { return metav1.Now() } - } - - for i := range controllers { - controller := controllers[i] - selector := labels.Set(controller.Spec.Selector).AsSelector() - options := metav1.ListOptions{LabelSelector: selector.String()} - pods, err := r.podClient.Pods(controller.Namespace).List(context.TODO(), options) - if err != nil { - return 0, 0, err - } - for _, v1Pod := range pods.Items { - // Do not count deleted pods as ready - if v1Pod.DeletionTimestamp != nil { - continue - } - if !podutils.IsPodAvailable(&v1Pod, minReadySeconds, r.nowFn()) { - continue - } - switch controller.Name { - case oldRc.Name: - oldReady++ - case newRc.Name: - newReady++ - } - } - } - return oldReady, newReady, nil -} - -// getOrCreateTargetControllerWithClient looks for an existing controller with -// sourceID. If found, the existing controller is returned with true -// indicating that the controller already exists. If the controller isn't -// found, a new one is created and returned along with false indicating the -// controller was created. -// -// Existing controllers are validated to ensure their sourceIDAnnotation -// matches sourceID; if there's a mismatch, an error is returned. -func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { - existingRc, err := r.existingController(controller) - if err != nil { - if !errors.IsNotFound(err) { - // There was an error trying to find the controller; don't assume we - // should create it. - return nil, false, err - } - if valOrZero(controller.Spec.Replicas) <= 0 { - return nil, false, fmt.Errorf("Invalid controller spec for %s; required: > 0 replicas, actual: %d", controller.Name, valOrZero(controller.Spec.Replicas)) - } - // The controller wasn't found, so create it. - if controller.Annotations == nil { - controller.Annotations = map[string]string{} - } - controller.Annotations[desiredReplicasAnnotation] = fmt.Sprintf("%d", valOrZero(controller.Spec.Replicas)) - controller.Annotations[sourceIDAnnotation] = sourceID - controller.Spec.Replicas = utilpointer.Int32Ptr(0) - newRc, err := r.rcClient.ReplicationControllers(r.ns).Create(context.TODO(), controller, metav1.CreateOptions{}) - return newRc, false, err - } - // Validate and use the existing controller. - annotations := existingRc.Annotations - source := annotations[sourceIDAnnotation] - _, ok := annotations[desiredReplicasAnnotation] - if source != sourceID || !ok { - return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceID, annotations) - } - return existingRc, true, nil -} - -// existingController verifies if the controller already exists -func (r *RollingUpdater) existingController(controller *corev1.ReplicationController) (*corev1.ReplicationController, error) { - // without rc name but generate name, there's no existing rc - if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { - return nil, errors.NewNotFound(corev1.Resource("replicationcontrollers"), controller.Name) - } - // controller name is required to get rc back - return r.rcClient.ReplicationControllers(controller.Namespace).Get(context.TODO(), controller.Name, metav1.GetOptions{}) -} - -// cleanupWithClients performs cleanup tasks after the rolling update. Update -// process related annotations are removed from oldRc and newRc. The -// CleanupPolicy on config is executed. -func (r *RollingUpdater) cleanupWithClients(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { - // Clean up annotations - var err error - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - applyUpdate := func(rc *corev1.ReplicationController) { - delete(rc.Annotations, sourceIDAnnotation) - delete(rc.Annotations, desiredReplicasAnnotation) - } - if newRc, err = updateRcWithRetries(r.rcClient, r.ns, newRc, applyUpdate); err != nil { - return err - } - - if err = wait.Poll(config.Interval, config.Timeout, controllerHasDesiredReplicas(r.rcClient, newRc)); err != nil { - return err - } - newRc, err = r.rcClient.ReplicationControllers(r.ns).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - switch config.CleanupPolicy { - case DeleteRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting %s\n", oldRc.Name) - return r.rcClient.ReplicationControllers(r.ns).Delete(context.TODO(), oldRc.Name, nil) - case RenameRollingUpdateCleanupPolicy: - // delete old rc - fmt.Fprintf(config.Out, "Update succeeded. Deleting old controller: %s\n", oldRc.Name) - if err := r.rcClient.ReplicationControllers(r.ns).Delete(context.TODO(), oldRc.Name, nil); err != nil { - return err - } - fmt.Fprintf(config.Out, "Renaming %s to %s\n", newRc.Name, oldRc.Name) - return Rename(r.rcClient, newRc, oldRc.Name) - case PreserveRollingUpdateCleanupPolicy: - return nil - default: - return nil - } -} - -func Rename(c corev1client.ReplicationControllersGetter, rc *corev1.ReplicationController, newName string) error { - oldName := rc.Name - rc.Name = newName - rc.ResourceVersion = "" - // First delete the oldName RC and orphan its pods. - policy := metav1.DeletePropagationOrphan - err := c.ReplicationControllers(rc.Namespace).Delete(context.TODO(), oldName, &metav1.DeleteOptions{PropagationPolicy: &policy}) - if err != nil && !errors.IsNotFound(err) { - return err - } - err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { - _, err := c.ReplicationControllers(rc.Namespace).Get(context.TODO(), oldName, metav1.GetOptions{}) - if err == nil { - return false, nil - } else if errors.IsNotFound(err) { - return true, nil - } else { - return false, err - } - }) - if err != nil { - return err - } - // Then create the same RC with the new name. - _, err = c.ReplicationControllers(rc.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) - return err -} - -func LoadExistingNextReplicationController(c corev1client.ReplicationControllersGetter, namespace, newName string) (*corev1.ReplicationController, error) { - if len(newName) == 0 { - return nil, nil - } - newRc, err := c.ReplicationControllers(namespace).Get(context.TODO(), newName, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) { - return nil, nil - } - return newRc, err -} - -type NewControllerConfig struct { - Namespace string - OldName, NewName string - Image string - Container string - DeploymentKey string - PullPolicy corev1.PullPolicy -} - -func CreateNewControllerFromCurrentController(rcClient corev1client.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*corev1.ReplicationController, error) { - containerIndex := 0 - // load the old RC into the "new" RC - newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(context.TODO(), cfg.OldName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - if len(cfg.Container) != 0 { - containerFound := false - - for i, c := range newRc.Spec.Template.Spec.Containers { - if c.Name == cfg.Container { - containerIndex = i - containerFound = true - break - } - } - - if !containerFound { - return nil, fmt.Errorf("container %s not found in pod", cfg.Container) - } - } - - if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { - return nil, fmt.Errorf("must specify container to update when updating a multi-container pod") - } - - if len(newRc.Spec.Template.Spec.Containers) == 0 { - return nil, fmt.Errorf("pod has no containers! (%v)", newRc) - } - newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image - if len(cfg.PullPolicy) != 0 { - newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy - } - - newHash, err := util.HashObject(newRc, codec) - if err != nil { - return nil, err - } - - if len(cfg.NewName) == 0 { - cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) - } - newRc.Name = cfg.NewName - - newRc.Spec.Selector[cfg.DeploymentKey] = newHash - newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash - // Clear resource version after hashing so that identical updates get different hashes. - newRc.ResourceVersion = "" - return newRc, nil -} - -func AbortRollingUpdate(c *RollingUpdaterConfig) error { - // Swap the controllers - tmp := c.OldRc - c.OldRc = c.NewRc - c.NewRc = tmp - - if c.NewRc.Annotations == nil { - c.NewRc.Annotations = map[string]string{} - } - c.NewRc.Annotations[sourceIDAnnotation] = fmt.Sprintf("%s:%s", c.OldRc.Name, c.OldRc.UID) - - // Use the original value since the replica count change from old to new - // could be asymmetric. If we don't know the original count, we can't safely - // roll back to a known good size. - originalSize, foundOriginal := tmp.Annotations[originalReplicasAnnotation] - if !foundOriginal { - return fmt.Errorf("couldn't find original replica count of %q", tmp.Name) - } - fmt.Fprintf(c.Out, "Setting %q replicas to %s\n", c.NewRc.Name, originalSize) - c.NewRc.Annotations[desiredReplicasAnnotation] = originalSize - c.CleanupPolicy = DeleteRollingUpdateCleanupPolicy - return nil -} - -func GetNextControllerAnnotation(rc *corev1.ReplicationController) (string, bool) { - res, found := rc.Annotations[nextControllerAnnotation] - return res, found -} - -func SetNextControllerAnnotation(rc *corev1.ReplicationController, name string) { - if rc.Annotations == nil { - rc.Annotations = map[string]string{} - } - rc.Annotations[nextControllerAnnotation] = name -} - -func UpdateExistingReplicationController(rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, oldRc *corev1.ReplicationController, namespace, newName, deploymentKey, deploymentValue string, out io.Writer) (*corev1.ReplicationController, error) { - if _, found := oldRc.Spec.Selector[deploymentKey]; !found { - SetNextControllerAnnotation(oldRc, newName) - return AddDeploymentKeyToReplicationController(oldRc, rcClient, podClient, deploymentKey, deploymentValue, namespace, out) - } - - // If we didn't need to update the controller for the deployment key, we still need to write - // the "next" controller. - applyUpdate := func(rc *corev1.ReplicationController) { - SetNextControllerAnnotation(rc, newName) - } - return updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate) -} - -func AddDeploymentKeyToReplicationController(oldRc *corev1.ReplicationController, rcClient corev1client.ReplicationControllersGetter, podClient corev1client.PodsGetter, deploymentKey, deploymentValue, namespace string, out io.Writer) (*corev1.ReplicationController, error) { - var err error - // First, update the template label. This ensures that any newly created pods will have the new label - applyUpdate := func(rc *corev1.ReplicationController) { - if rc.Spec.Template.Labels == nil { - rc.Spec.Template.Labels = map[string]string{} - } - rc.Spec.Template.Labels[deploymentKey] = deploymentValue - } - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Update all pods managed by the rc to have the new hash label, so they are correctly adopted - // TODO: extract the code from the label command and re-use it here. - selector := labels.SelectorFromSet(oldRc.Spec.Selector) - options := metav1.ListOptions{LabelSelector: selector.String()} - podList, err := podClient.Pods(namespace).List(context.TODO(), options) - if err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - applyUpdate := func(p *corev1.Pod) { - if p.Labels == nil { - p.Labels = map[string]string{ - deploymentKey: deploymentValue, - } - } else { - p.Labels[deploymentKey] = deploymentValue - } - } - if pod, err = updatePodWithRetries(podClient, namespace, pod, applyUpdate); err != nil { - return nil, err - } - } - - if oldRc.Spec.Selector == nil { - oldRc.Spec.Selector = map[string]string{} - } - applyUpdate = func(rc *corev1.ReplicationController) { - rc.Spec.Selector[deploymentKey] = deploymentValue - } - // Update the selector of the rc so it manages all the pods we updated above - if oldRc, err = updateRcWithRetries(rcClient, namespace, oldRc, applyUpdate); err != nil { - return nil, err - } - - // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager - // doesn't see the update to its pod template and creates a new pod with the old labels after - // we've finished re-adopting existing pods to the rc. - selector = labels.SelectorFromSet(oldRc.Spec.Selector) - options = metav1.ListOptions{LabelSelector: selector.String()} - if podList, err = podClient.Pods(namespace).List(context.TODO(), options); err != nil { - return nil, err - } - for ix := range podList.Items { - pod := &podList.Items[ix] - if value, found := pod.Labels[deploymentKey]; !found || value != deploymentValue { - if err := podClient.Pods(namespace).Delete(context.TODO(), pod.Name, nil); err != nil { - return nil, err - } - } - } - - return oldRc, nil -} - -type updateRcFunc func(controller *corev1.ReplicationController) - -// updateRcWithRetries retries updating the given rc on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updateRcWithRetries(rcClient corev1client.ReplicationControllersGetter, namespace string, rc *corev1.ReplicationController, applyUpdate updateRcFunc) (*corev1.ReplicationController, error) { - // Deep copy the rc in case we failed on Get during retry loop - oldRc := rc.DeepCopy() - err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(rc) - if rc, e = rcClient.ReplicationControllers(namespace).Update(context.TODO(), rc, metav1.UpdateOptions{}); e == nil { - // rc contains the latest controller post update - return - } - updateErr := e - // Update the controller with the latest resource version, if the update failed we - // can't trust rc so use oldRc.Name. - if rc, e = rcClient.ReplicationControllers(namespace).Get(context.TODO(), oldRc.Name, metav1.GetOptions{}); e != nil { - // The Get failed: Value in rc cannot be trusted. - rc = oldRc - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return rc, err -} - -type updatePodFunc func(controller *corev1.Pod) - -// updatePodWithRetries retries updating the given pod on conflict with the following steps: -// 1. Get latest resource -// 2. applyUpdate -// 3. Update the resource -func updatePodWithRetries(podClient corev1client.PodsGetter, namespace string, pod *corev1.Pod, applyUpdate updatePodFunc) (*corev1.Pod, error) { - // Deep copy the pod in case we failed on Get during retry loop - oldPod := pod.DeepCopy() - err := retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(pod) - if pod, e = podClient.Pods(namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}); e == nil { - return - } - updateErr := e - if pod, e = podClient.Pods(namespace).Get(context.TODO(), oldPod.Name, metav1.GetOptions{}); e != nil { - pod = oldPod - } - // Only return the error from update - return updateErr - }) - // If the error is non-nil the returned pod cannot be trusted, if it is nil, the returned - // controller contains the applied update. - return pod, err -} - -func FindSourceController(r corev1client.ReplicationControllersGetter, namespace, name string) (*corev1.ReplicationController, error) { - list, err := r.ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - for ix := range list.Items { - rc := &list.Items[ix] - if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIDAnnotation], name) { - return rc, nil - } - } - return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) -} - -// controllerHasDesiredReplicas returns a condition that will be true if and only if -// the desired replica count for a controller's ReplicaSelector equals the Replicas count. -func controllerHasDesiredReplicas(rcClient corev1client.ReplicationControllersGetter, controller *corev1.ReplicationController) wait.ConditionFunc { - - // If we're given a controller where the status lags the spec, it either means that the controller is stale, - // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. - desiredGeneration := controller.Generation - - return func() (bool, error) { - ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(context.TODO(), controller.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, - // or, after this check has passed, a modification causes the rc manager to create more pods. - // This will not be an issue once we've implemented graceful delete for rcs, but till then - // concurrent stop operations on the same rc might have unintended side effects. - return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == valOrZero(ctrl.Spec.Replicas), nil - } -} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater_test.go deleted file mode 100644 index 0dfb04f2e03..00000000000 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rolling_updater_test.go +++ /dev/null @@ -1,1852 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rollingupdate - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - restclient "k8s.io/client-go/rest" - manualfake "k8s.io/client-go/rest/fake" - testcore "k8s.io/client-go/testing" - "k8s.io/kubectl/pkg/scale" - "k8s.io/kubectl/pkg/scheme" - "k8s.io/kubectl/pkg/util" -) - -func oldRc(replicas int, original int) *corev1.ReplicationController { - t := replicas - replicasCopy := int32(t) - return &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo-v1", - UID: "7764ae47-9092-11e4-8393-42010af018ff", - Annotations: map[string]string{ - originalReplicasAnnotation: fmt.Sprintf("%d", original), - }, - }, - Spec: corev1.ReplicationControllerSpec{ - Replicas: &replicasCopy, - Selector: map[string]string{"version": "v1"}, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-v1", - Labels: map[string]string{"version": "v1"}, - }, - }, - }, - Status: corev1.ReplicationControllerStatus{ - Replicas: int32(replicas), - }, - } -} - -func newRc(replicas int, desired int) *corev1.ReplicationController { - rc := oldRc(replicas, replicas) - rc.Spec.Template = &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-v2", - Labels: map[string]string{"version": "v2"}, - }, - } - rc.Spec.Selector = map[string]string{"version": "v2"} - rc.ObjectMeta = metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo-v2", - Annotations: map[string]string{ - desiredReplicasAnnotation: fmt.Sprintf("%d", desired), - sourceIDAnnotation: "foo-v1:7764ae47-9092-11e4-8393-42010af018ff", - }, - } - return rc -} - -// TestUpdate performs complex scenario testing for rolling updates. It -// provides fine grained control over the states for each update interval to -// allow the expression of as many edge cases as possible. -func TestUpdate(t *testing.T) { - // up represents a simulated scale up event and expectation - type up struct { - // to is the expected replica count for a scale-up - to int - } - // down represents a simulated scale down event and expectation - type down struct { - // oldReady is the number of oldRc replicas which will be seen - // as ready during the scale down attempt - oldReady int - // newReady is the number of newRc replicas which will be seen - // as ready during the scale up attempt - newReady int - // to is the expected replica count for the scale down - to int - // noop and to are mutually exclusive; if noop is true, that means for - // this down event, no scaling attempt should be made (for example, if - // by scaling down, the readiness minimum would be crossed.) - noop bool - } - - tests := []struct { - name string - // oldRc is the "from" deployment - oldRc *corev1.ReplicationController - // newRc is the "to" deployment - newRc *corev1.ReplicationController - // whether newRc existed (false means it was created) - newRcExists bool - maxUnavail intstr.IntOrString - maxSurge intstr.IntOrString - // expected is the sequence of up/down events that will be simulated and - // verified - expected []interface{} - // output is the expected textual output written - output string - }{ - { - name: "10->10 30/0 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("30%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 10, newReady: 0, to: 7}, - up{3}, - down{oldReady: 7, newReady: 3, to: 4}, - up{6}, - down{oldReady: 4, newReady: 6, to: 1}, - up{9}, - down{oldReady: 1, newReady: 9, to: 0}, - up{10}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 9 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 10 -`, - }, - { - name: "10->10 30/0 delayed readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("30%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 10, newReady: 0, to: 7}, - up{3}, - down{oldReady: 7, newReady: 0, noop: true}, - down{oldReady: 7, newReady: 1, to: 6}, - up{4}, - down{oldReady: 6, newReady: 4, to: 3}, - up{7}, - down{oldReady: 3, newReady: 7, to: 0}, - up{10}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 7 pods available, don't exceed 10 pods) -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 6 -Scaling foo-v2 up to 4 -Scaling foo-v1 down to 3 -Scaling foo-v2 up to 7 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 10 -`, - }, { - name: "10->10 30/0 fast readiness, continuation", - oldRc: oldRc(7, 10), - newRc: newRc(3, 10), - newRcExists: false, - maxUnavail: intstr.FromString("30%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 7, newReady: 3, to: 4}, - up{6}, - down{oldReady: 4, newReady: 6, to: 1}, - up{9}, - down{oldReady: 1, newReady: 9, to: 0}, - up{10}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 3 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 9 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 10 -`, - }, { - name: "10->10 30/0 fast readiness, continued after restart which prevented first scale-up", - oldRc: oldRc(7, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("30%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 7, newReady: 0, noop: true}, - up{3}, - down{oldReady: 7, newReady: 3, to: 4}, - up{6}, - down{oldReady: 4, newReady: 6, to: 1}, - up{9}, - down{oldReady: 1, newReady: 9, to: 0}, - up{10}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 7 to 0 (keep 7 pods available, don't exceed 10 pods) -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 9 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 10 -`, - }, { - name: "10->10 0/30 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("30%"), - expected: []interface{}{ - up{3}, - down{oldReady: 10, newReady: 3, to: 7}, - up{6}, - down{oldReady: 7, newReady: 6, to: 4}, - up{9}, - down{oldReady: 4, newReady: 9, to: 1}, - up{10}, - down{oldReady: 1, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 9 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 0/30 delayed readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("30%"), - expected: []interface{}{ - up{3}, - down{oldReady: 10, newReady: 0, noop: true}, - down{oldReady: 10, newReady: 1, to: 9}, - up{4}, - down{oldReady: 9, newReady: 3, to: 7}, - up{6}, - down{oldReady: 7, newReady: 6, to: 4}, - up{9}, - down{oldReady: 4, newReady: 9, to: 1}, - up{10}, - down{oldReady: 1, newReady: 9, noop: true}, - down{oldReady: 1, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 13 pods) -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 9 -Scaling foo-v2 up to 4 -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 9 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 10/20 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("20%"), - expected: []interface{}{ - up{2}, - down{oldReady: 10, newReady: 2, to: 7}, - up{5}, - down{oldReady: 7, newReady: 5, to: 4}, - up{8}, - down{oldReady: 4, newReady: 8, to: 1}, - up{10}, - down{oldReady: 1, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) -Scaling foo-v2 up to 2 -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 5 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 8 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 10/20 delayed readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("20%"), - expected: []interface{}{ - up{2}, - down{oldReady: 10, newReady: 2, to: 7}, - up{5}, - down{oldReady: 7, newReady: 4, to: 5}, - up{7}, - down{oldReady: 5, newReady: 4, noop: true}, - down{oldReady: 5, newReady: 7, to: 2}, - up{10}, - down{oldReady: 2, newReady: 9, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) -Scaling foo-v2 up to 2 -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 5 -Scaling foo-v1 down to 5 -Scaling foo-v2 up to 7 -Scaling foo-v1 down to 2 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 10/20 fast readiness continued after restart which prevented first scale-down", - oldRc: oldRc(10, 10), - newRc: newRc(2, 10), - newRcExists: false, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("20%"), - expected: []interface{}{ - down{oldReady: 10, newReady: 2, to: 7}, - up{5}, - down{oldReady: 7, newReady: 5, to: 4}, - up{8}, - down{oldReady: 4, newReady: 8, to: 1}, - up{10}, - down{oldReady: 1, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 2 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods) -Scaling foo-v1 down to 7 -Scaling foo-v2 up to 5 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 8 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 0/100 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("100%"), - expected: []interface{}{ - up{10}, - down{oldReady: 10, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 0/100 delayed readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("100%"), - expected: []interface{}{ - up{10}, - down{oldReady: 10, newReady: 0, noop: true}, - down{oldReady: 10, newReady: 2, to: 8}, - down{oldReady: 8, newReady: 7, to: 3}, - down{oldReady: 3, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 20 pods) -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 8 -Scaling foo-v1 down to 3 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 100/0 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 10), - newRcExists: false, - maxUnavail: intstr.FromString("100%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 10, newReady: 0, to: 0}, - up{10}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 0 pods available, don't exceed 10 pods) -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 10 -`, - }, { - name: "1->1 25/25 maintain minimum availability", - oldRc: oldRc(1, 1), - newRc: newRc(0, 1), - newRcExists: false, - maxUnavail: intstr.FromString("25%"), - maxSurge: intstr.FromString("25%"), - expected: []interface{}{ - up{1}, - down{oldReady: 1, newReady: 0, noop: true}, - down{oldReady: 1, newReady: 1, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -`, - }, { - name: "1->1 0/10 delayed readiness", - oldRc: oldRc(1, 1), - newRc: newRc(0, 1), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("10%"), - expected: []interface{}{ - up{1}, - down{oldReady: 1, newReady: 0, noop: true}, - down{oldReady: 1, newReady: 1, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -`, - }, { - name: "1->1 10/10 delayed readiness", - oldRc: oldRc(1, 1), - newRc: newRc(0, 1), - newRcExists: false, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("10%"), - expected: []interface{}{ - up{1}, - down{oldReady: 1, newReady: 0, noop: true}, - down{oldReady: 1, newReady: 1, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -`, - }, { - name: "3->3 1/1 fast readiness (absolute values)", - oldRc: oldRc(3, 3), - newRc: newRc(0, 3), - newRcExists: false, - maxUnavail: intstr.FromInt(0), - maxSurge: intstr.FromInt(1), - expected: []interface{}{ - up{1}, - down{oldReady: 3, newReady: 1, to: 2}, - up{2}, - down{oldReady: 2, newReady: 2, to: 1}, - up{3}, - down{oldReady: 1, newReady: 3, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 3, scaling down foo-v1 from 3 to 0 (keep 3 pods available, don't exceed 4 pods) -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 2 -Scaling foo-v2 up to 2 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 3 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->10 0/20 fast readiness, continued after restart which resulted in partial first scale-up", - oldRc: oldRc(6, 10), - newRc: newRc(5, 10), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("20%"), - expected: []interface{}{ - up{6}, - down{oldReady: 6, newReady: 6, to: 4}, - up{8}, - down{oldReady: 4, newReady: 8, to: 2}, - up{10}, - down{oldReady: 1, newReady: 10, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 5 to 10, scaling down foo-v1 from 6 to 0 (keep 10 pods available, don't exceed 12 pods) -Scaling foo-v2 up to 6 -Scaling foo-v1 down to 4 -Scaling foo-v2 up to 8 -Scaling foo-v1 down to 2 -Scaling foo-v2 up to 10 -Scaling foo-v1 down to 0 -`, - }, { - name: "10->20 0/300 fast readiness", - oldRc: oldRc(10, 10), - newRc: newRc(0, 20), - newRcExists: false, - maxUnavail: intstr.FromString("0%"), - maxSurge: intstr.FromString("300%"), - expected: []interface{}{ - up{20}, - down{oldReady: 10, newReady: 20, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 80 pods) -Scaling foo-v2 up to 20 -Scaling foo-v1 down to 0 -`, - }, { - name: "1->1 0/1 scale down unavailable rc to a ready rc (rollback)", - oldRc: oldRc(1, 1), - newRc: newRc(1, 1), - newRcExists: true, - maxUnavail: intstr.FromInt(0), - maxSurge: intstr.FromInt(1), - expected: []interface{}{ - up{1}, - down{oldReady: 0, newReady: 1, to: 0}, - }, - output: `Continuing update with existing controller foo-v2. -Scaling up foo-v2 from 1 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v1 down to 0 -`, - }, - { - name: "3->0 1/1 desired 0 (absolute values)", - oldRc: oldRc(3, 3), - newRc: newRc(0, 0), - newRcExists: true, - maxUnavail: intstr.FromInt(1), - maxSurge: intstr.FromInt(1), - expected: []interface{}{ - down{oldReady: 3, newReady: 0, to: 0}, - }, - output: `Continuing update with existing controller foo-v2. -Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 1 pods) -Scaling foo-v1 down to 0 -`, - }, - { - name: "3->0 10/10 desired 0 (percentages)", - oldRc: oldRc(3, 3), - newRc: newRc(0, 0), - newRcExists: true, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("10%"), - expected: []interface{}{ - down{oldReady: 3, newReady: 0, to: 0}, - }, - output: `Continuing update with existing controller foo-v2. -Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) -Scaling foo-v1 down to 0 -`, - }, - { - name: "3->0 10/10 desired 0 (create new RC)", - oldRc: oldRc(3, 3), - newRc: newRc(0, 0), - newRcExists: false, - maxUnavail: intstr.FromString("10%"), - maxSurge: intstr.FromString("10%"), - expected: []interface{}{ - down{oldReady: 3, newReady: 0, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods) -Scaling foo-v1 down to 0 -`, - }, - { - name: "0->0 1/1 desired 0 (absolute values)", - oldRc: oldRc(0, 0), - newRc: newRc(0, 0), - newRcExists: true, - maxUnavail: intstr.FromInt(1), - maxSurge: intstr.FromInt(1), - expected: []interface{}{ - down{oldReady: 0, newReady: 0, to: 0}, - }, - output: `Continuing update with existing controller foo-v2. -Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 0 to 0 (keep 0 pods available, don't exceed 1 pods) -`, - }, { - name: "30->2 50%/0", - oldRc: oldRc(30, 30), - newRc: newRc(0, 2), - newRcExists: false, - maxUnavail: intstr.FromString("50%"), - maxSurge: intstr.FromInt(0), - expected: []interface{}{ - down{oldReady: 30, newReady: 0, to: 1}, - up{1}, - down{oldReady: 1, newReady: 2, to: 0}, - up{2}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 2 -`, - }, - { - name: "2->2 1/0 blocked oldRc", - oldRc: oldRc(2, 2), - newRc: newRc(0, 2), - newRcExists: false, - maxUnavail: intstr.FromInt(1), - maxSurge: intstr.FromInt(0), - expected: []interface{}{ - down{oldReady: 1, newReady: 0, to: 1}, - up{1}, - down{oldReady: 1, newReady: 1, to: 0}, - up{2}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 2 -`, - }, - { - name: "1->1 1/0 allow maxUnavailability", - oldRc: oldRc(1, 1), - newRc: newRc(0, 1), - newRcExists: false, - maxUnavail: intstr.FromString("1%"), - maxSurge: intstr.FromInt(0), - expected: []interface{}{ - down{oldReady: 1, newReady: 0, to: 0}, - up{1}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 0 pods available, don't exceed 1 pods) -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 1 -`, - }, - { - name: "1->2 25/25 complex asymmetric deployment", - oldRc: oldRc(1, 1), - newRc: newRc(0, 2), - newRcExists: false, - maxUnavail: intstr.FromString("25%"), - maxSurge: intstr.FromString("25%"), - expected: []interface{}{ - up{2}, - down{oldReady: 1, newReady: 2, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods) -Scaling foo-v2 up to 2 -Scaling foo-v1 down to 0 -`, - }, - { - name: "2->2 25/1 maxSurge trumps maxUnavailable", - oldRc: oldRc(2, 2), - newRc: newRc(0, 2), - newRcExists: false, - maxUnavail: intstr.FromString("25%"), - maxSurge: intstr.FromString("1%"), - expected: []interface{}{ - up{1}, - down{oldReady: 2, newReady: 1, to: 1}, - up{2}, - down{oldReady: 1, newReady: 2, to: 0}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 2 -Scaling foo-v1 down to 0 -`, - }, - { - name: "2->2 25/0 maxUnavailable resolves to zero, then one", - oldRc: oldRc(2, 2), - newRc: newRc(0, 2), - newRcExists: false, - maxUnavail: intstr.FromString("25%"), - maxSurge: intstr.FromString("0%"), - expected: []interface{}{ - down{oldReady: 2, newReady: 0, to: 1}, - up{1}, - down{oldReady: 1, newReady: 1, to: 0}, - up{2}, - }, - output: `Created foo-v2 -Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) -Scaling foo-v1 down to 1 -Scaling foo-v2 up to 1 -Scaling foo-v1 down to 0 -Scaling foo-v2 up to 2 -`, - }, - } - - for i, tt := range tests { - // Extract expectations into some makeshift FIFOs so they can be returned - // in the correct order from the right places. This lets scale downs be - // expressed a single event even though the data is used from multiple - // interface calls. - t.Run(tt.name, func(t *testing.T) { - oldReady := []int{} - newReady := []int{} - upTo := []int{} - downTo := []int{} - for _, event := range tt.expected { - switch e := event.(type) { - case down: - oldReady = append(oldReady, e.oldReady) - newReady = append(newReady, e.newReady) - if !e.noop { - downTo = append(downTo, e.to) - } - case up: - upTo = append(upTo, e.to) - } - } - - // Make a way to get the next item from our FIFOs. Returns -1 if the array - // is empty. - next := func(s *[]int) int { - slice := *s - v := -1 - if len(slice) > 0 { - v = slice[0] - if len(slice) > 1 { - *s = slice[1:] - } else { - *s = []int{} - } - } - return v - } - t.Logf("running test %d (%s) (up: %v, down: %v, oldReady: %v, newReady: %v)", i, tt.name, upTo, downTo, oldReady, newReady) - updater := &RollingUpdater{ - ns: "default", - scaleAndWait: func(rc *corev1.ReplicationController, retry *scale.RetryParams, wait *scale.RetryParams) (*corev1.ReplicationController, error) { - // Return a scale up or scale down expectation depending on the rc, - // and throw errors if there is no expectation expressed for this - // call. - expected := -1 - switch { - case rc == tt.newRc: - t.Logf("scaling up %s to %d", rc.Name, rc.Spec.Replicas) - expected = next(&upTo) - case rc == tt.oldRc: - t.Logf("scaling down %s to %d", rc.Name, rc.Spec.Replicas) - expected = next(&downTo) - } - if expected == -1 { - t.Fatalf("unexpected scale of %s to %d", rc.Name, rc.Spec.Replicas) - } else if e, a := expected, int(*rc.Spec.Replicas); e != a { - t.Fatalf("expected scale of %s to %d, got %d", rc.Name, e, a) - } - // Simulate the scale. - rc.Status.Replicas = *rc.Spec.Replicas - return rc, nil - }, - getOrCreateTargetController: func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { - // Simulate a create vs. update of an existing controller. - return tt.newRc, tt.newRcExists, nil - }, - cleanup: func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { - return nil - }, - } - // Set up a mock readiness check which handles the test assertions. - updater.getReadyPods = func(oldRc, newRc *corev1.ReplicationController, minReadySecondsDeadline int32) (int32, int32, error) { - // Return simulated readiness, and throw an error if this call has no - // expectations defined. - oldReady := next(&oldReady) - newReady := next(&newReady) - if oldReady == -1 || newReady == -1 { - t.Fatalf("unexpected getReadyPods call for:\noldRc: %#v\nnewRc: %#v", oldRc, newRc) - } - return int32(oldReady), int32(newReady), nil - } - var buffer bytes.Buffer - config := &RollingUpdaterConfig{ - Out: &buffer, - OldRc: tt.oldRc, - NewRc: tt.newRc, - UpdatePeriod: 0, - Interval: time.Millisecond, - Timeout: time.Millisecond, - CleanupPolicy: DeleteRollingUpdateCleanupPolicy, - MaxUnavailable: tt.maxUnavail, - MaxSurge: tt.maxSurge, - } - err := updater.Update(config) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if buffer.String() != tt.output { - t.Errorf("Bad output. expected:\n%s\ngot:\n%s", tt.output, buffer.String()) - } - }) - } -} - -// TestUpdate_progressTimeout ensures that an update which isn't making any -// progress will eventually time out with a specified error. -func TestUpdate_progressTimeout(t *testing.T) { - oldRc := oldRc(2, 2) - newRc := newRc(0, 2) - updater := &RollingUpdater{ - ns: "default", - scaleAndWait: func(rc *corev1.ReplicationController, retry *scale.RetryParams, wait *scale.RetryParams) (*corev1.ReplicationController, error) { - // Do nothing. - return rc, nil - }, - getOrCreateTargetController: func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { - return newRc, false, nil - }, - cleanup: func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { - return nil - }, - } - updater.getReadyPods = func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) { - // Coerce a timeout by pods never becoming ready. - return 0, 0, nil - } - var buffer bytes.Buffer - config := &RollingUpdaterConfig{ - Out: &buffer, - OldRc: oldRc, - NewRc: newRc, - UpdatePeriod: 0, - Interval: time.Millisecond, - Timeout: time.Millisecond, - CleanupPolicy: DeleteRollingUpdateCleanupPolicy, - MaxUnavailable: intstr.FromInt(0), - MaxSurge: intstr.FromInt(1), - } - err := updater.Update(config) - if err == nil { - t.Fatalf("expected an error") - } - if e, a := "timed out waiting for any update progress to be made", err.Error(); e != a { - t.Fatalf("expected error message: %s, got: %s", e, a) - } -} - -func TestUpdate_assignOriginalAnnotation(t *testing.T) { - oldRc := oldRc(1, 1) - delete(oldRc.Annotations, originalReplicasAnnotation) - newRc := newRc(1, 1) - fake := fake.NewSimpleClientset(oldRc) - updater := &RollingUpdater{ - rcClient: fake.CoreV1(), - podClient: fake.CoreV1(), - ns: "default", - scaleAndWait: func(rc *corev1.ReplicationController, retry *scale.RetryParams, wait *scale.RetryParams) (*corev1.ReplicationController, error) { - return rc, nil - }, - getOrCreateTargetController: func(controller *corev1.ReplicationController, sourceID string) (*corev1.ReplicationController, bool, error) { - return newRc, false, nil - }, - cleanup: func(oldRc, newRc *corev1.ReplicationController, config *RollingUpdaterConfig) error { - return nil - }, - getReadyPods: func(oldRc, newRc *corev1.ReplicationController, minReadySeconds int32) (int32, int32, error) { - return 1, 1, nil - }, - } - var buffer bytes.Buffer - config := &RollingUpdaterConfig{ - Out: &buffer, - OldRc: oldRc, - NewRc: newRc, - UpdatePeriod: 0, - Interval: time.Millisecond, - Timeout: time.Millisecond, - CleanupPolicy: DeleteRollingUpdateCleanupPolicy, - MaxUnavailable: intstr.FromString("100%"), - } - err := updater.Update(config) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - updateAction := fake.Actions()[1].(testcore.UpdateAction) - if updateAction.GetResource().GroupResource() != corev1.Resource("replicationcontrollers") { - t.Fatalf("expected rc to be updated: %#v", updateAction) - } - if e, a := "1", updateAction.GetObject().(*corev1.ReplicationController).Annotations[originalReplicasAnnotation]; e != a { - t.Fatalf("expected annotation value %s, got %s", e, a) - } -} - -func TestRollingUpdater_multipleContainersInPod(t *testing.T) { - tests := []struct { - name string - oldRc *corev1.ReplicationController - newRc *corev1.ReplicationController - container string - image string - deploymentKey string - }{ - { - name: "test1", - oldRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "old", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "old", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "container1", - Image: "image1", - }, - { - Name: "container2", - Image: "image2", - }, - }, - }, - }, - }, - }, - newRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "old", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "old", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "container1", - Image: "newimage", - }, - { - Name: "container2", - Image: "image2", - }, - }, - }, - }, - }, - }, - container: "container1", - image: "newimage", - deploymentKey: "dk", - }, - { - name: "test2", - oldRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "bar", - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "old", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "old", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "container1", - Image: "image1", - }, - }, - }, - }, - }, - }, - newRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "bar", - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "old", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "old", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "container1", - Image: "newimage", - }, - }, - }, - }, - }, - }, - container: "container1", - image: "newimage", - deploymentKey: "dk", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fake := fake.NewSimpleClientset(tt.oldRc) - - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - - deploymentHash, err := util.HashObject(tt.newRc, codec) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - tt.newRc.Spec.Selector[tt.deploymentKey] = deploymentHash - tt.newRc.Spec.Template.Labels[tt.deploymentKey] = deploymentHash - tt.newRc.Name = fmt.Sprintf("%s-%s", tt.newRc.Name, deploymentHash) - - config := &NewControllerConfig{ - Namespace: metav1.NamespaceDefault, - OldName: tt.oldRc.ObjectMeta.Name, - NewName: tt.newRc.ObjectMeta.Name, - Image: tt.image, - Container: tt.container, - DeploymentKey: tt.deploymentKey, - } - updatedRc, err := CreateNewControllerFromCurrentController(fake.CoreV1(), codec, config) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(updatedRc, tt.newRc) { - t.Errorf("expected:\n%#v\ngot:\n%#v\n", tt.newRc, updatedRc) - } - }) - } -} - -// TestRollingUpdater_cleanupWithClients ensures that the cleanup policy is -// correctly implemented. -func TestRollingUpdater_cleanupWithClients(t *testing.T) { - rc := oldRc(2, 2) - rcExisting := newRc(1, 3) - - tests := []struct { - name string - policy RollingUpdaterCleanupPolicy - responses []runtime.Object - expected []string - }{ - { - name: "preserve", - policy: PreserveRollingUpdateCleanupPolicy, - responses: []runtime.Object{rcExisting}, - expected: []string{ - "get", - "update", - "get", - "get", - }, - }, - { - name: "delete", - policy: DeleteRollingUpdateCleanupPolicy, - responses: []runtime.Object{rcExisting}, - expected: []string{ - "get", - "update", - "get", - "get", - "delete", - }, - }, - //{ - // This cases is separated to a standalone - // TestRollingUpdater_cleanupWithClients_Rename. We have to do this - // because the unversioned fake client is unable to delete objects. - // TODO: uncomment this case when the unversioned fake client uses - // pkg/client/testing/core. - // { - // name: "rename", - // policy: RenameRollingUpdateCleanupPolicy, - // responses: []runtime.Object{rcExisting}, - // expected: []string{ - // "get", - // "update", - // "get", - // "get", - // "delete", - // "create", - // "delete", - // }, - // }, - //}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - objs := []runtime.Object{rc} - objs = append(objs, tt.responses...) - fake := fake.NewSimpleClientset(objs...) - updater := &RollingUpdater{ - ns: "default", - rcClient: fake.CoreV1(), - podClient: fake.CoreV1(), - } - config := &RollingUpdaterConfig{ - Out: ioutil.Discard, - OldRc: rc, - NewRc: rcExisting, - UpdatePeriod: 0, - Interval: time.Millisecond, - Timeout: time.Millisecond, - CleanupPolicy: tt.policy, - } - err := updater.cleanupWithClients(rc, rcExisting, config) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if len(fake.Actions()) != len(tt.expected) { - t.Fatalf("%s: unexpected actions: %v, expected %v", tt.name, fake.Actions(), tt.expected) - } - for j, action := range fake.Actions() { - if e, a := tt.expected[j], action.GetVerb(); e != a { - t.Errorf("%s: unexpected action: expected %s, got %s", tt.name, e, a) - } - } - }) - } -} - -// TestRollingUpdater_cleanupWithClients_Rename tests the rename cleanup policy. It's separated to -// a standalone test because the unversioned fake client is unable to delete -// objects. -// TODO: move this test back to TestRollingUpdater_cleanupWithClients -// when the fake client uses pkg/client/testing/core in the future. -func TestRollingUpdater_cleanupWithClients_Rename(t *testing.T) { - rc := oldRc(2, 2) - rcExisting := newRc(1, 3) - expectedActions := []string{"delete", "get", "create"} - fake := fake.NewSimpleClientset() - fake.AddReactor("*", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) { - switch action.(type) { - case testcore.CreateAction: - return true, nil, nil - case testcore.GetAction: - return true, nil, errors.NewNotFound(schema.GroupResource{}, "") - case testcore.DeleteAction: - return true, nil, nil - } - return false, nil, nil - }) - - err := Rename(fake.CoreV1(), rcExisting, rc.Name) - if err != nil { - t.Fatal(err) - } - for j, action := range fake.Actions() { - if e, a := expectedActions[j], action.GetVerb(); e != a { - t.Errorf("unexpected action: expected %s, got %s", e, a) - } - } -} - -func TestFindSourceController(t *testing.T) { - ctrl1 := corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - Annotations: map[string]string{ - sourceIDAnnotation: "bar:1234", - }, - }, - } - ctrl2 := corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "bar", - Annotations: map[string]string{ - sourceIDAnnotation: "foo:12345", - }, - }, - } - ctrl3 := corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "baz", - Annotations: map[string]string{ - sourceIDAnnotation: "baz:45667", - }, - }, - } - tests := []struct { - list *corev1.ReplicationControllerList - expectedController *corev1.ReplicationController - name string - expectError bool - }{ - { - list: &corev1.ReplicationControllerList{}, - expectError: true, - }, - { - list: &corev1.ReplicationControllerList{ - Items: []corev1.ReplicationController{ctrl1}, - }, - name: "foo", - expectError: true, - }, - { - list: &corev1.ReplicationControllerList{ - Items: []corev1.ReplicationController{ctrl1}, - }, - name: "bar", - expectedController: &ctrl1, - }, - { - list: &corev1.ReplicationControllerList{ - Items: []corev1.ReplicationController{ctrl1, ctrl2}, - }, - name: "bar", - expectedController: &ctrl1, - }, - { - list: &corev1.ReplicationControllerList{ - Items: []corev1.ReplicationController{ctrl1, ctrl2}, - }, - name: "foo", - expectedController: &ctrl2, - }, - { - list: &corev1.ReplicationControllerList{ - Items: []corev1.ReplicationController{ctrl1, ctrl2, ctrl3}, - }, - name: "baz", - expectedController: &ctrl3, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fakeClient := fake.NewSimpleClientset(tt.list) - ctrl, err := FindSourceController(fakeClient.CoreV1(), "default", tt.name) - if tt.expectError && err == nil { - t.Errorf("unexpected non-error") - } - if !tt.expectError && err != nil { - t.Errorf("unexpected error") - } - if !reflect.DeepEqual(ctrl, tt.expectedController) { - t.Errorf("expected:\n%v\ngot:\n%v\n", tt.expectedController, ctrl) - } - }) - } -} - -func TestUpdateExistingReplicationController(t *testing.T) { - tests := []struct { - rc *corev1.ReplicationController - name string - deploymentKey string - deploymentValue string - - expectedRc *corev1.ReplicationController - expectErr bool - }{ - { - rc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: corev1.ReplicationControllerSpec{ - Template: &corev1.PodTemplateSpec{}, - }, - }, - name: "foo", - deploymentKey: "dk", - deploymentValue: "some-hash", - - expectedRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - Annotations: map[string]string{ - "kubectl.kubernetes.io/next-controller-id": "foo", - }, - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "some-hash", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "some-hash", - }, - }, - }, - }, - }, - }, - { - rc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - }, - Spec: corev1.ReplicationControllerSpec{ - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "some-other-hash", - }, - }, - }, - Selector: map[string]string{ - "dk": "some-other-hash", - }, - }, - }, - name: "foo", - deploymentKey: "dk", - deploymentValue: "some-hash", - - expectedRc: &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: "foo", - Annotations: map[string]string{ - "kubectl.kubernetes.io/next-controller-id": "foo", - }, - }, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "dk": "some-other-hash", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "dk": "some-other-hash", - }, - }, - }, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - buffer := &bytes.Buffer{} - fakeClient := fake.NewSimpleClientset(tt.expectedRc) - rc, err := UpdateExistingReplicationController(fakeClient.CoreV1(), fakeClient.CoreV1(), tt.rc, "default", tt.name, tt.deploymentKey, tt.deploymentValue, buffer) - if !reflect.DeepEqual(rc, tt.expectedRc) { - t.Errorf("expected:\n%#v\ngot:\n%#v\n", tt.expectedRc, rc) - } - if tt.expectErr && err == nil { - t.Errorf("unexpected non-error") - } - if !tt.expectErr && err != nil { - t.Errorf("unexpected error: %v", err) - } - }) - } -} - -func TestUpdateRcWithRetries(t *testing.T) { - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - one := int32(1) - grace := int64(30) - enableServiceLinks := corev1.DefaultEnableServiceLinks - rc := &corev1.ReplicationController{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ReplicationController", - }, - ObjectMeta: metav1.ObjectMeta{Name: "rc", - Labels: map[string]string{ - "foo": "bar", - }, - }, - Spec: corev1.ReplicationControllerSpec{ - Replicas: &one, - Selector: map[string]string{ - "foo": "bar", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "foo": "bar", - }, - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyAlways, - DNSPolicy: corev1.DNSClusterFirst, - TerminationGracePeriodSeconds: &grace, - SecurityContext: &corev1.PodSecurityContext{}, - EnableServiceLinks: &enableServiceLinks, - }, - }, - }, - } - rc.Spec.Template.Spec.SchedulerName = "default-scheduler" - - // Test end to end updating of the rc with retries. Essentially make sure the update handler - // sees the right updates, failures in update/get are handled properly, and that the updated - // rc with new resource version is returned to the caller. Without any of these rollingupdate - // will fail cryptically. - newRc := *rc - newRc.ResourceVersion = "2" - newRc.Spec.Selector["baz"] = "foobar" - header := http.Header{} - header.Set("Content-Type", runtime.ContentTypeJSON) - updates := []*http.Response{ - {StatusCode: http.StatusConflict, Header: header, Body: objBody(codec, &corev1.ReplicationController{})}, // conflict - {StatusCode: http.StatusConflict, Header: header, Body: objBody(codec, &corev1.ReplicationController{})}, // conflict - {StatusCode: http.StatusOK, Header: header, Body: objBody(codec, &newRc)}, - } - gets := []*http.Response{ - {StatusCode: http.StatusInternalServerError, Header: header, Body: objBody(codec, &corev1.ReplicationController{})}, - {StatusCode: http.StatusOK, Header: header, Body: objBody(codec, rc)}, - } - fakeClient := &manualfake.RESTClient{ - GroupVersion: corev1.SchemeGroupVersion, - NegotiatedSerializer: scheme.Codecs, - Client: manualfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - switch p, m := req.URL.Path, req.Method; { - case p == "/api/v1/namespaces/default/replicationcontrollers/rc" && m == "PUT": - update := updates[0] - updates = updates[1:] - // We should always get an update with a valid rc even when the get fails. The rc should always - // contain the update. - if c, ok := readOrDie(t, req, codec).(*corev1.ReplicationController); !ok || !apiequality.Semantic.DeepEqual(rc, c) { - t.Errorf("Unexpected update body, got %+v expected %+v", c, rc) - t.Error(diff.ObjectDiff(rc, c)) - } else if sel, ok := c.Spec.Selector["baz"]; !ok || sel != "foobar" { - t.Errorf("Expected selector label update, got %+v", c.Spec.Selector) - } else { - delete(c.Spec.Selector, "baz") - } - return update, nil - case p == "/api/v1/namespaces/default/replicationcontrollers/rc" && m == "GET": - get := gets[0] - gets = gets[1:] - return get, nil - default: - t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) - return nil, nil - } - }), - } - clientConfig := &restclient.Config{ - APIPath: "/api", - ContentConfig: restclient.ContentConfig{ - NegotiatedSerializer: scheme.Codecs, - GroupVersion: &corev1.SchemeGroupVersion, - }, - } - restClient, _ := restclient.RESTClientFor(clientConfig) - restClient.Client = fakeClient.Client - clientset := kubernetes.New(restClient) - - if rc, err := updateRcWithRetries( - clientset.CoreV1(), "default", rc, func(c *corev1.ReplicationController) { - c.Spec.Selector["baz"] = "foobar" - }); err != nil { - t.Errorf("unexpected error: %v", err) - } else if sel, ok := rc.Spec.Selector["baz"]; !ok || sel != "foobar" || rc.ResourceVersion != "2" { - t.Errorf("Expected updated rc, got %+v", rc) - } - if len(updates) != 0 || len(gets) != 0 { - t.Errorf("Remaining updates %#v gets %#v", updates, gets) - } -} - -func readOrDie(t *testing.T, req *http.Request, codec runtime.Codec) runtime.Object { - data, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("Error reading: %v", err) - t.FailNow() - } - codec2 := scheme.Codecs.UniversalDecoder(scheme.Scheme.PrioritizedVersionsAllGroups()...) - obj, err := runtime.Decode(codec2, data) - if err != nil { - t.Log(string(data)) - t.Errorf("error decoding: %v", err) - t.FailNow() - } - return obj -} - -func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj)))) -} - -func TestAddDeploymentHash(t *testing.T) { - buf := &bytes.Buffer{} - codec := scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...) - rc := &corev1.ReplicationController{ - ObjectMeta: metav1.ObjectMeta{Name: "rc"}, - Spec: corev1.ReplicationControllerSpec{ - Selector: map[string]string{ - "foo": "bar", - }, - Template: &corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "foo": "bar", - }, - }, - }, - }, - } - - podList := &corev1.PodList{ - Items: []corev1.Pod{ - {ObjectMeta: metav1.ObjectMeta{Name: "foo"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "bar"}}, - {ObjectMeta: metav1.ObjectMeta{Name: "baz"}}, - }, - } - - seen := sets.String{} - updatedRc := false - fakeClient := &manualfake.RESTClient{ - GroupVersion: corev1.SchemeGroupVersion, - NegotiatedSerializer: scheme.Codecs, - Client: manualfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { - header := http.Header{} - header.Set("Content-Type", runtime.ContentTypeJSON) - switch p, m := req.URL.Path, req.Method; { - case p == "/api/v1/namespaces/default/pods" && m == "GET": - if req.URL.RawQuery != "labelSelector=foo%3Dbar" { - t.Errorf("Unexpected query string: %s", req.URL.RawQuery) - } - return &http.Response{StatusCode: http.StatusOK, Header: header, Body: objBody(codec, podList)}, nil - case p == "/api/v1/namespaces/default/pods/foo" && m == "PUT": - seen.Insert("foo") - obj := readOrDie(t, req, codec) - podList.Items[0] = *(obj.(*corev1.Pod)) - return &http.Response{StatusCode: http.StatusOK, Header: header, Body: objBody(codec, &podList.Items[0])}, nil - case p == "/api/v1/namespaces/default/pods/bar" && m == "PUT": - seen.Insert("bar") - obj := readOrDie(t, req, codec) - podList.Items[1] = *(obj.(*corev1.Pod)) - return &http.Response{StatusCode: http.StatusOK, Header: header, Body: objBody(codec, &podList.Items[1])}, nil - case p == "/api/v1/namespaces/default/pods/baz" && m == "PUT": - seen.Insert("baz") - obj := readOrDie(t, req, codec) - podList.Items[2] = *(obj.(*corev1.Pod)) - return &http.Response{StatusCode: http.StatusOK, Header: header, Body: objBody(codec, &podList.Items[2])}, nil - case p == "/api/v1/namespaces/default/replicationcontrollers/rc" && m == "PUT": - updatedRc = true - return &http.Response{StatusCode: http.StatusOK, Header: header, Body: objBody(codec, rc)}, nil - default: - t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) - return nil, nil - } - }), - } - clientConfig := &restclient.Config{ - APIPath: "/api", - ContentConfig: restclient.ContentConfig{ - NegotiatedSerializer: scheme.Codecs, - GroupVersion: &corev1.SchemeGroupVersion, - }, - } - restClient, _ := restclient.RESTClientFor(clientConfig) - restClient.Client = fakeClient.Client - clientset := kubernetes.New(restClient) - - if _, err := AddDeploymentKeyToReplicationController(rc, clientset.CoreV1(), clientset.CoreV1(), "dk", "hash", metav1.NamespaceDefault, buf); err != nil { - t.Errorf("unexpected error: %v", err) - } - for _, pod := range podList.Items { - if !seen.Has(pod.Name) { - t.Errorf("Missing update for pod: %s", pod.Name) - } - } - if !updatedRc { - t.Errorf("Failed to update replication controller with new labels") - } -} - -func TestRollingUpdater_readyPods(t *testing.T) { - count := 0 - now := metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC) - mkpod := func(owner *corev1.ReplicationController, ready bool, readyTime metav1.Time) *corev1.Pod { - count = count + 1 - labels := map[string]string{} - for k, v := range owner.Spec.Selector { - labels[k] = v - } - status := corev1.ConditionTrue - if !ready { - status = corev1.ConditionFalse - } - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: metav1.NamespaceDefault, - Name: fmt.Sprintf("pod-%d", count), - Labels: labels, - }, - Status: corev1.PodStatus{ - Conditions: []corev1.PodCondition{ - { - Type: corev1.PodReady, - Status: status, - LastTransitionTime: readyTime, - }, - }, - }, - } - } - - tests := []struct { - name string - oldRc *corev1.ReplicationController - newRc *corev1.ReplicationController - // expectated old/new ready counts - oldReady int32 - newReady int32 - // pods owned by the rcs; indicate whether they're ready - oldPods []bool - newPods []bool - // deletions - should be less then the size of the respective slice above - // e.g. len(oldPods) > oldPodDeletions && len(newPods) > newPodDeletions - oldPodDeletions int - newPodDeletions int - // specify additional time to wait for deployment to wait on top of the - // pod ready time - minReadySeconds int32 - podReadyTimeFn func() metav1.Time - nowFn func() metav1.Time - }{ - { - name: "test1", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 4, - newReady: 2, - oldPods: []bool{ - true, - true, - true, - true, - }, - newPods: []bool{ - true, - false, - true, - false, - }, - }, - { - name: "test2", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 0, - newReady: 1, - oldPods: []bool{ - false, - }, - newPods: []bool{ - true, - }, - }, - { - name: "test3", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 1, - newReady: 0, - oldPods: []bool{ - true, - }, - newPods: []bool{ - false, - }, - }, - { - name: "test4", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 0, - newReady: 0, - oldPods: []bool{ - true, - }, - newPods: []bool{ - true, - }, - minReadySeconds: 5, - nowFn: func() metav1.Time { return now }, - }, - { - name: "test5", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 1, - newReady: 1, - oldPods: []bool{ - true, - }, - newPods: []bool{ - true, - }, - minReadySeconds: 5, - nowFn: func() metav1.Time { return metav1.Time{Time: now.Add(time.Duration(6 * time.Second))} }, - podReadyTimeFn: func() metav1.Time { return now }, - }, - { - name: "test6", - oldRc: oldRc(4, 4), - newRc: newRc(4, 4), - oldReady: 2, - newReady: 0, - oldPods: []bool{ - // All old pods are ready - true, true, true, true, - }, - // Two of them have been marked for deletion though - oldPodDeletions: 2, - }, - } - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Logf("evaluating test %d", i) - if tt.nowFn == nil { - tt.nowFn = func() metav1.Time { return now } - } - if tt.podReadyTimeFn == nil { - tt.podReadyTimeFn = tt.nowFn - } - // Populate the fake client with pods associated with their owners. - pods := []runtime.Object{} - for _, ready := range tt.oldPods { - pod := mkpod(tt.oldRc, ready, tt.podReadyTimeFn()) - if tt.oldPodDeletions > 0 { - now := metav1.Now() - pod.DeletionTimestamp = &now - tt.oldPodDeletions-- - } - pods = append(pods, pod) - } - for _, ready := range tt.newPods { - pod := mkpod(tt.newRc, ready, tt.podReadyTimeFn()) - if tt.newPodDeletions > 0 { - now := metav1.Now() - pod.DeletionTimestamp = &now - tt.newPodDeletions-- - } - pods = append(pods, pod) - } - client := fake.NewSimpleClientset(pods...) - - updater := &RollingUpdater{ - ns: "default", - rcClient: client.CoreV1(), - podClient: client.CoreV1(), - nowFn: tt.nowFn, - } - oldReady, newReady, err := updater.readyPods(tt.oldRc, tt.newRc, tt.minReadySeconds) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if e, a := tt.oldReady, oldReady; e != a { - t.Errorf("expected old ready %d, got %d", e, a) - } - if e, a := tt.newReady, newReady; e != a { - t.Errorf("expected new ready %d, got %d", e, a) - } - }) - } -} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go deleted file mode 100644 index eae7a1ef037..00000000000 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go +++ /dev/null @@ -1,480 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rollingupdate - -import ( - "bytes" - "context" - "fmt" - "time" - - "github.com/spf13/cobra" - "k8s.io/klog" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/cli-runtime/pkg/printers" - "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/kubernetes" - scaleclient "k8s.io/client-go/scale" - cmdutil "k8s.io/kubectl/pkg/cmd/util" - "k8s.io/kubectl/pkg/scheme" - "k8s.io/kubectl/pkg/util" - "k8s.io/kubectl/pkg/util/i18n" - "k8s.io/kubectl/pkg/util/templates" - "k8s.io/kubectl/pkg/validation" -) - -var ( - rollingUpdateLong = templates.LongDesc(i18n.T(` - Perform a rolling update of the given ReplicationController. - - Replaces the specified replication controller with a new replication controller by updating one pod at a time to use the - new PodTemplate. The new-controller.json must specify the same namespace as the - existing replication controller and overwrite at least one (common) label in its replicaSelector. - - ![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)`)) - - rollingUpdateExample = templates.Examples(i18n.T(` - # Update pods of frontend-v1 using new replication controller data in frontend-v2.json. - kubectl rolling-update frontend-v1 -f frontend-v2.json - - # Update pods of frontend-v1 using JSON data passed into stdin. - cat frontend-v2.json | kubectl rolling-update frontend-v1 -f - - - # Update the pods of frontend-v1 to frontend-v2 by just changing the image, and switching the - # name of the replication controller. - kubectl rolling-update frontend-v1 frontend-v2 --image=image:v2 - - # Update the pods of frontend by just changing the image, and keeping the old name. - kubectl rolling-update frontend --image=image:v2 - - # Abort and reverse an existing rollout in progress (from frontend-v1 to frontend-v2). - kubectl rolling-update frontend-v1 frontend-v2 --rollback`)) -) - -const ( - updatePeriod = 1 * time.Minute - timeout = 5 * time.Minute - pollInterval = 3 * time.Second -) - -type RollingUpdateOptions struct { - FilenameOptions *resource.FilenameOptions - - OldName string - KeepOldName bool - - DeploymentKey string - Image string - Container string - PullPolicy string - Rollback bool - Period time.Duration - Timeout time.Duration - Interval time.Duration - DryRun bool - OutputFormat string - Namespace string - EnforceNamespace bool - - ScaleClient scaleclient.ScalesGetter - ClientSet kubernetes.Interface - Builder *resource.Builder - - ShouldValidate bool - Validator func(bool) (validation.Schema, error) - - FindNewName func(*corev1.ReplicationController) string - - PrintFlags *genericclioptions.PrintFlags - ToPrinter func(string) (printers.ResourcePrinter, error) - - genericclioptions.IOStreams -} - -func NewRollingUpdateOptions(streams genericclioptions.IOStreams) *RollingUpdateOptions { - return &RollingUpdateOptions{ - PrintFlags: genericclioptions.NewPrintFlags("rolling updated").WithTypeSetter(scheme.Scheme), - FilenameOptions: &resource.FilenameOptions{}, - DeploymentKey: "deployment", - Timeout: timeout, - Interval: pollInterval, - Period: updatePeriod, - - IOStreams: streams, - } -} - -func NewCmdRollingUpdate(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) *cobra.Command { - o := NewRollingUpdateOptions(ioStreams) - - cmd := &cobra.Command{ - Use: "rolling-update OLD_CONTROLLER_NAME ([NEW_CONTROLLER_NAME] --image=NEW_CONTAINER_IMAGE | -f NEW_CONTROLLER_SPEC)", - DisableFlagsInUseLine: true, - Short: "Perform a rolling update. This command is deprecated, use rollout instead.", - Long: rollingUpdateLong, - Example: rollingUpdateExample, - Deprecated: `use "rollout" instead`, - Hidden: true, - Run: func(cmd *cobra.Command, args []string) { - cmdutil.CheckErr(o.Complete(f, cmd, args)) - cmdutil.CheckErr(o.Validate(cmd, args)) - cmdutil.CheckErr(o.Run()) - }, - } - - o.PrintFlags.AddFlags(cmd) - - cmd.Flags().DurationVar(&o.Period, "update-period", o.Period, `Time to wait between updating pods. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) - cmd.Flags().DurationVar(&o.Interval, "poll-interval", o.Interval, `Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) - cmd.Flags().DurationVar(&o.Timeout, "timeout", o.Timeout, `Max time to wait for a replication controller to update before giving up. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".`) - usage := "Filename or URL to file to use to create the new replication controller." - cmdutil.AddJsonFilenameFlag(cmd.Flags(), &o.FilenameOptions.Filenames, usage) - cmd.Flags().StringVar(&o.Image, "image", o.Image, i18n.T("Image to use for upgrading the replication controller. Must be distinct from the existing image (either new image or new image tag). Can not be used with --filename/-f")) - cmd.Flags().StringVar(&o.DeploymentKey, "deployment-label-key", o.DeploymentKey, i18n.T("The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise")) - cmd.Flags().StringVar(&o.Container, "container", o.Container, i18n.T("Container name which will have its image upgraded. Only relevant when --image is specified, ignored otherwise. Required when using --image on a multi-container pod")) - cmd.Flags().StringVar(&o.PullPolicy, "image-pull-policy", o.PullPolicy, i18n.T("Explicit policy for when to pull container images. Required when --image is same as existing image, ignored otherwise.")) - cmd.Flags().BoolVar(&o.Rollback, "rollback", o.Rollback, "If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout") - cmdutil.AddDryRunFlag(cmd) - cmdutil.AddValidateFlags(cmd) - - return cmd -} - -func validateArguments(cmd *cobra.Command, filenames, args []string) error { - deploymentKey := cmdutil.GetFlagString(cmd, "deployment-label-key") - image := cmdutil.GetFlagString(cmd, "image") - rollback := cmdutil.GetFlagBool(cmd, "rollback") - - errors := []error{} - if len(deploymentKey) == 0 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "--deployment-label-key can not be empty")) - } - if len(filenames) > 1 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "May only specify a single filename for new controller")) - } - - if !rollback { - if len(filenames) == 0 && len(image) == 0 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "Must specify --filename or --image for new controller")) - } else if len(filenames) != 0 && len(image) != 0 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "--filename and --image can not both be specified")) - } - } else { - if len(filenames) != 0 || len(image) != 0 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "Don't specify --filename or --image on rollback")) - } - } - - if len(args) < 1 { - errors = append(errors, cmdutil.UsageErrorf(cmd, "Must specify the controller to update")) - } - - return utilerrors.NewAggregate(errors) -} - -func (o *RollingUpdateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { - if len(args) > 0 { - o.OldName = args[0] - } - o.DryRun = getClientSideDryRun(cmd) - o.OutputFormat = cmdutil.GetFlagString(cmd, "output") - o.KeepOldName = len(args) == 1 - o.ShouldValidate = cmdutil.GetFlagBool(cmd, "validate") - - o.Validator = f.Validator - o.FindNewName = func(obj *corev1.ReplicationController) string { - return findNewName(args, obj) - } - - var err error - o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return err - } - - o.ScaleClient, err = cmdutil.ScaleClientFn(f) - if err != nil { - return err - } - - o.ClientSet, err = f.KubernetesClientSet() - if err != nil { - return err - } - - o.Builder = f.NewBuilder() - - o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { - o.PrintFlags.NamePrintFlags.Operation = operation - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } - - return o.PrintFlags.ToPrinter() - } - return nil -} - -func (o *RollingUpdateOptions) Validate(cmd *cobra.Command, args []string) error { - return validateArguments(cmd, o.FilenameOptions.Filenames, args) -} - -func (o *RollingUpdateOptions) Run() error { - filename := "" - if len(o.FilenameOptions.Filenames) > 0 { - filename = o.FilenameOptions.Filenames[0] - } - - coreClient := o.ClientSet.CoreV1() - - var newRc *corev1.ReplicationController - // fetch rc - oldRc, err := coreClient.ReplicationControllers(o.Namespace).Get(context.TODO(), o.OldName, metav1.GetOptions{}) - if err != nil { - if !errors.IsNotFound(err) || len(o.Image) == 0 || !o.KeepOldName { - return err - } - // We're in the middle of a rename, look for an RC with a source annotation of oldName - newRc, err := FindSourceController(coreClient, o.Namespace, o.OldName) - if err != nil { - return err - } - return Rename(coreClient, newRc, o.OldName) - } - - var replicasDefaulted bool - - if len(filename) != 0 { - schema, err := o.Validator(o.ShouldValidate) - if err != nil { - return err - } - - request := o.Builder. - Unstructured(). - Schema(schema). - NamespaceParam(o.Namespace).DefaultNamespace(). - FilenameParam(o.EnforceNamespace, &resource.FilenameOptions{Recursive: false, Filenames: []string{filename}}). - Flatten(). - Do() - infos, err := request.Infos() - if err != nil { - return err - } - // Handle filename input from stdin. - if len(infos) > 1 { - return fmt.Errorf("%s specifies multiple items", filename) - } - if len(infos) == 0 { - return fmt.Errorf("please make sure %s exists and is not empty", filename) - } - - uncastVersionedObj, err := scheme.Scheme.ConvertToVersion(infos[0].Object, corev1.SchemeGroupVersion) - if err != nil { - klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) - return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) - } - switch t := uncastVersionedObj.(type) { - case *corev1.ReplicationController: - replicasDefaulted = t.Spec.Replicas == nil - newRc = t - } - if newRc == nil { - klog.V(4).Infof("Object %T is not a ReplicationController", infos[0].Object) - return fmt.Errorf("%s contains a %v not a ReplicationController", filename, infos[0].Object.GetObjectKind().GroupVersionKind()) - } - } - - // If the --image option is specified, we need to create a new rc with at least one different selector - // than the old rc. This selector is the hash of the rc, with a suffix to provide uniqueness for - // same-image updates. - if len(o.Image) != 0 { - codec := scheme.Codecs.LegacyCodec(corev1.SchemeGroupVersion) - newName := o.FindNewName(oldRc) - if newRc, err = LoadExistingNextReplicationController(coreClient, o.Namespace, newName); err != nil { - return err - } - if newRc != nil { - if inProgressImage := newRc.Spec.Template.Spec.Containers[0].Image; inProgressImage != o.Image { - return fmt.Errorf("Found existing in-progress update to image (%s).\nEither continue in-progress update with --image=%s or rollback with --rollback", inProgressImage, inProgressImage) - } - fmt.Fprintf(o.Out, "Found existing update in progress (%s), resuming.\n", newRc.Name) - } else { - config := &NewControllerConfig{ - Namespace: o.Namespace, - OldName: o.OldName, - NewName: newName, - Image: o.Image, - Container: o.Container, - DeploymentKey: o.DeploymentKey, - } - if oldRc.Spec.Template.Spec.Containers[0].Image == o.Image { - if len(o.PullPolicy) == 0 { - return fmt.Errorf("--image-pull-policy (Always|Never|IfNotPresent) must be provided when --image is the same as existing container image") - } - config.PullPolicy = corev1.PullPolicy(o.PullPolicy) - } - newRc, err = CreateNewControllerFromCurrentController(coreClient, codec, config) - if err != nil { - return err - } - } - // Update the existing replication controller with pointers to the 'next' controller - // and adding the label if necessary to distinguish it from the 'next' controller. - oldHash, err := util.HashObject(oldRc, codec) - if err != nil { - return err - } - // If new image is same as old, the hash may not be distinct, so add a suffix. - oldHash += "-orig" - oldRc, err = UpdateExistingReplicationController(coreClient, coreClient, oldRc, o.Namespace, newRc.Name, o.DeploymentKey, oldHash, o.Out) - if err != nil { - return err - } - } - - if o.Rollback { - newName := o.FindNewName(oldRc) - if newRc, err = LoadExistingNextReplicationController(coreClient, o.Namespace, newName); err != nil { - return err - } - - if newRc == nil { - return fmt.Errorf("Could not find %s to rollback.\n", newName) - } - } - - if o.OldName == newRc.Name { - return fmt.Errorf("%s cannot have the same name as the existing ReplicationController %s", - filename, o.OldName) - } - - updater := NewRollingUpdater(newRc.Namespace, coreClient, coreClient, o.ScaleClient) - - // To successfully pull off a rolling update the new and old rc have to differ - // by at least one selector. Every new pod should have the selector and every - // old pod should not have the selector. - var hasLabel bool - for key, oldValue := range oldRc.Spec.Selector { - if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue { - hasLabel = true - break - } - } - if !hasLabel { - return fmt.Errorf("%s must specify a matching key with non-equal value in Selector for %s", - filename, o.OldName) - } - // TODO: handle scales during rolling update - if replicasDefaulted { - t := *oldRc.Spec.Replicas - newRc.Spec.Replicas = &t - } - - if o.DryRun { - oldRcData := &bytes.Buffer{} - newRcData := &bytes.Buffer{} - if o.OutputFormat == "" { - oldRcData.WriteString(oldRc.Name) - newRcData.WriteString(newRc.Name) - } else { - printer, err := o.ToPrinter("rolling updated") - if err != nil { - return err - } - if err := printer.PrintObj(oldRc, oldRcData); err != nil { - return err - } - if err := printer.PrintObj(newRc, newRcData); err != nil { - return err - } - } - fmt.Fprintf(o.Out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes())) - return nil - } - updateCleanupPolicy := DeleteRollingUpdateCleanupPolicy - if o.KeepOldName { - updateCleanupPolicy = RenameRollingUpdateCleanupPolicy - } - config := &RollingUpdaterConfig{ - Out: o.Out, - OldRc: oldRc, - NewRc: newRc, - UpdatePeriod: o.Period, - Interval: o.Interval, - Timeout: timeout, - CleanupPolicy: updateCleanupPolicy, - MaxUnavailable: intstr.FromInt(0), - MaxSurge: intstr.FromInt(1), - } - if o.Rollback { - err = AbortRollingUpdate(config) - if err != nil { - return err - } - coreClient.ReplicationControllers(config.NewRc.Namespace).Update(context.TODO(), config.NewRc, metav1.UpdateOptions{}) - } - err = updater.Update(config) - if err != nil { - return err - } - - message := "rolling updated" - if o.KeepOldName { - newRc.Name = o.OldName - } else { - message = fmt.Sprintf("rolling updated to %q", newRc.Name) - } - newRc, err = coreClient.ReplicationControllers(o.Namespace).Get(context.TODO(), newRc.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - printer, err := o.ToPrinter(message) - if err != nil { - return err - } - return printer.PrintObj(newRc, o.Out) -} - -func findNewName(args []string, oldRc *corev1.ReplicationController) string { - if len(args) >= 2 { - return args[1] - } - if oldRc != nil { - newName, _ := GetNextControllerAnnotation(oldRc) - return newName - } - return "" -} - -func getClientSideDryRun(cmd *cobra.Command) bool { - dryRunStrategy, err := cmdutil.GetDryRunStrategy(cmd) - if err != nil { - klog.Fatalf("error accessing --dry-run flag for command %s: %v", cmd.Name(), err) - } - if dryRunStrategy == cmdutil.DryRunServer { - klog.Fatalf("--dry-run=server for command %s is not supported yet", cmd.Name()) - } - return dryRunStrategy == cmdutil.DryRunClient -} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate_test.go deleted file mode 100644 index a3aa07ee1fb..00000000000 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package rollingupdate - -import ( - "testing" - - "k8s.io/cli-runtime/pkg/genericclioptions" - cmdtesting "k8s.io/kubectl/pkg/cmd/testing" -) - -func TestValidateArgs(t *testing.T) { - f := cmdtesting.NewTestFactory() - defer f.Cleanup() - - tests := []struct { - testName string - flags map[string]string - filenames []string - args []string - expectErr bool - }{ - { - testName: "nothing", - expectErr: true, - }, - { - testName: "no file, no image", - flags: map[string]string{}, - args: []string{"foo"}, - expectErr: true, - }, - { - testName: "valid file example", - filenames: []string{"bar.yaml"}, - args: []string{"foo"}, - }, - { - testName: "missing second image name", - flags: map[string]string{ - "image": "foo:v2", - }, - args: []string{"foo"}, - }, - { - testName: "valid image example", - flags: map[string]string{ - "image": "foo:v2", - }, - args: []string{"foo", "foo-v2"}, - }, - { - testName: "both filename and image example", - flags: map[string]string{ - "image": "foo:v2", - }, - filenames: []string{"bar.yaml"}, - args: []string{"foo", "foo-v2"}, - expectErr: true, - }, - } - for _, test := range tests { - cmd := NewCmdRollingUpdate(f, genericclioptions.NewTestIOStreamsDiscard()) - - if test.flags != nil { - for key, val := range test.flags { - cmd.Flags().Set(key, val) - } - } - err := validateArguments(cmd, test.filenames, test.args) - if err != nil && !test.expectErr { - t.Errorf("%s: unexpected error: %v", test.testName, err) - } - if err == nil && test.expectErr { - t.Errorf("%s: unexpected non-error", test.testName) - } - } -} diff --git a/test/conformance/testdata/conformance.txt b/test/conformance/testdata/conformance.txt index a55fd580e6d..6519e1426da 100644 --- a/test/conformance/testdata/conformance.txt +++ b/test/conformance/testdata/conformance.txt @@ -223,7 +223,6 @@ test/e2e/common/security_context.go: "should run the container as unprivileged w test/e2e/common/security_context.go: "should not allow privilege escalation when false" test/e2e/kubectl/kubectl.go: "should create and stop a replication controller" test/e2e/kubectl/kubectl.go: "should scale a replication controller" -test/e2e/kubectl/kubectl.go: "should do a rolling update of a replication controller" test/e2e/kubectl/kubectl.go: "should create and stop a working application" test/e2e/kubectl/kubectl.go: "should check if v1 is in available api versions" test/e2e/kubectl/kubectl.go: "should check if Kubernetes master services is included in cluster-info" @@ -233,7 +232,6 @@ test/e2e/kubectl/kubectl.go: "should update the label on a resource" test/e2e/kubectl/kubectl.go: "should be able to retrieve and filter logs" test/e2e/kubectl/kubectl.go: "should add annotations for pods in rc" test/e2e/kubectl/kubectl.go: "should check is all data is printed" -test/e2e/kubectl/kubectl.go: "should support rolling-update to same image" test/e2e/kubectl/kubectl.go: "should create a pod from an image when restart is Never" test/e2e/kubectl/kubectl.go: "should update a single-container pod's image" test/e2e/kubectl/kubectl.go: "should support proxy with --port 0" diff --git a/test/e2e/kubectl/BUILD b/test/e2e/kubectl/BUILD index 04c53764bb5..e2f589f0cdd 100644 --- a/test/e2e/kubectl/BUILD +++ b/test/e2e/kubectl/BUILD @@ -23,20 +23,17 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", "//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library", "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/kubectl/pkg/polymorphichelpers:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index d6addaa177a..d9bff2464af 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -52,20 +52,17 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/authentication/serviceaccount" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" - watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubectl/pkg/polymorphichelpers" "k8s.io/kubernetes/pkg/controller" commonutils "k8s.io/kubernetes/test/e2e/common" @@ -113,7 +110,6 @@ const ( var ( nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus) - kittenImage = imageutils.GetE2EImage(imageutils.Kitten) httpdImage = imageutils.GetE2EImage(imageutils.Httpd) busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox) agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost) @@ -277,11 +273,10 @@ var _ = SIGDescribe("Kubectl client", func() { } ginkgo.Describe("Update Demo", func() { - var nautilus, kitten string + var nautilus string ginkgo.BeforeEach(func() { updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo" nautilus = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in")))) - kitten = commonutils.SubstituteImageName(string(testfiles.ReadOrDie(filepath.Join(updateDemoRoot, "kitten-rc.yaml.in")))) }) /* Release : v1.9 @@ -316,22 +311,6 @@ var _ = SIGDescribe("Kubectl client", func() { framework.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) - - /* - Release : v1.9 - Testname: Kubectl, rolling update replication controller - Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Run a rolling update to run a different version of the container. All running instances SHOULD now be running the newer version of the container as part of the rolling update. - */ - framework.ConformanceIt("should do a rolling update of a replication controller ", func() { - ginkgo.By("creating the initial replication controller") - framework.RunKubectlOrDieInput(ns, string(nautilus[:]), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) - ginkgo.By("rolling-update to new replication controller") - debugDiscovery() - framework.RunKubectlOrDieInput(ns, string(kitten[:]), "rolling-update", "update-demo-nautilus", "--update-period=1s", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - validateController(c, kittenImage, 2, "update-demo", updateDemoSelector, getUDData("kitten.jpg", ns), ns) - // Everything will hopefully be cleaned up when the namespace is deleted. - }) }) ginkgo.Describe("Guestbook application", func() { @@ -1539,42 +1518,6 @@ metadata: }) }) - ginkgo.Describe("Kubectl rolling-update", func() { - var nsFlag string - var rcName string - var httpdRC string - var c clientset.Interface - - ginkgo.BeforeEach(func() { - c = f.ClientSet - nsFlag = fmt.Sprintf("--namespace=%v", ns) - rcName = "httpd-rc" - httpdRC = commonutils.SubstituteImageName(string(readTestFileOrDie(httpdRCFilename))) - - }) - - ginkgo.AfterEach(func() { - framework.RunKubectlOrDie(ns, "delete", "rc", rcName, nsFlag) - }) - - /* - Release : v1.9 - Testname: Kubectl, rolling update - Description: Command 'kubectl rolling-update' MUST replace the specified replication controller with a new replication controller by updating one pod at a time to use the new Pod spec. - */ - framework.ConformanceIt("should support rolling-update to same image ", func() { - ginkgo.By("running the image " + httpdImage) - framework.RunKubectlOrDieInput(ns, httpdRC, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - waitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) - - ginkgo.By("rolling-update to same image controller") - - debugDiscovery() - runKubectlRetryOrDie(ns, "rolling-update", rcName, "--update-period=1s", "--image="+httpdImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag) - validateController(c, httpdImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns) - }) - }) - ginkgo.Describe("Kubectl run pod", func() { var nsFlag string var podName string @@ -2167,8 +2110,6 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) } } -func noOpValidatorFn(c clientset.Interface, podID string) error { return nil } - // newBlockingReader returns a reader that allows reading the given string, // then blocks until Close() is called on the returned closer. // @@ -2332,35 +2273,3 @@ func createObjValidateOutputAndCleanup(namespace string, client dynamic.Resource framework.ExpectNotEqual(fields, defaults, fmt.Sprintf("expected non-default fields for resource: %s", resource.Name)) } } - -// waitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. -func waitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { - options := metav1.ListOptions{FieldSelector: fields.Set{ - "metadata.name": name, - "metadata.namespace": ns, - }.AsSelector().String()} - w, err := c.CoreV1().ReplicationControllers(ns).Watch(context.TODO(), options) - if err != nil { - return err - } - ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() - _, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) { - switch event.Type { - case watch.Deleted: - return false, apierrors.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "") - } - switch rc := event.Object.(type) { - case *v1.ReplicationController: - if rc.Name == name && rc.Namespace == ns && - rc.Generation <= rc.Status.ObservedGeneration && - *(rc.Spec.Replicas) == rc.Status.Replicas { - return true, nil - } - framework.Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d", - name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas) - } - return false, nil - }) - return err -} diff --git a/vendor/modules.txt b/vendor/modules.txt index d4f90b3b651..e0bd3ba77b4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1806,7 +1806,6 @@ k8s.io/kubectl/pkg/cmd/plugin k8s.io/kubectl/pkg/cmd/portforward k8s.io/kubectl/pkg/cmd/proxy k8s.io/kubectl/pkg/cmd/replace -k8s.io/kubectl/pkg/cmd/rollingupdate k8s.io/kubectl/pkg/cmd/rollout k8s.io/kubectl/pkg/cmd/run k8s.io/kubectl/pkg/cmd/scale