diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index ac9cb0b81ec..74743de98bc 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -92,6 +92,9 @@ var _ = SIGDescribe("Deployment", func() { It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() { testDeploymentsControllerRef(f) }) + It("deployment should support proportional scaling", func() { + testProportionalScalingDeployment(f) + }) // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues // See https://github.com/kubernetes/kubernetes/issues/29229 }) @@ -799,6 +802,126 @@ func testDeploymentsControllerRef(f *framework.Framework) { Expect(rsList.Items[0].UID).Should(Equal(orphanedRSUID)) } +// testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle +// of a rollout (either in progress or paused), then the Deployment will balance additional replicas +// in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk. +func testProportionalScalingDeployment(f *framework.Framework) { + ns := f.Namespace.Name + c := f.ClientSet + + podLabels := map[string]string{"name": NginxImageName} + replicas := int32(10) + + // Create a nginx deployment. + deploymentName := "nginx-deployment" + d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) + d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment) + d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) + d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) + + framework.Logf("Creating deployment %q", deploymentName) + deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) + Expect(err).NotTo(HaveOccurred()) + + framework.Logf("Waiting for observed generation %d", deployment.Generation) + Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + + // Verify that the required pods have come up. + framework.Logf("Waiting for all required pods to come up") + err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) + Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err) + + framework.Logf("Waiting for deployment %q to complete", deployment.Name) + Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) + + firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + Expect(err).NotTo(HaveOccurred()) + + // Update the deployment with a non-existent image so that the new replica set + // will be blocked to simulate a partial rollout. + framework.Logf("Updating deployment %q with a non-existent image", deploymentName) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = "nginx:404" + }) + Expect(err).NotTo(HaveOccurred()) + + framework.Logf("Waiting for observed generation %d", deployment.Generation) + Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + + // Checking state of first rollout's replicaset. + maxUnavailable, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false) + Expect(err).NotTo(HaveOccurred()) + + // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. + minAvailableReplicas := replicas - int32(maxUnavailable) + framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) + Expect(framework.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred()) + + // First rollout's replicaset should have .spec.replicas = 8 too. + framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) + Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(HaveOccurred()) + + // The desired replicas wait makes sure that the RS controller has created expected number of pods. + framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) + firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), firstRS) + Expect(err).NotTo(HaveOccurred()) + + // Checking state of second rollout's replicaset. + secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) + Expect(err).NotTo(HaveOccurred()) + + maxSurge, err := intstr.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false) + Expect(err).NotTo(HaveOccurred()) + + // Second rollout's replicaset should have 0 available replicas. + framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") + Expect(secondRS.Status.AvailableReplicas).Should(Equal(int32(0))) + + // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. + newReplicas := replicas + int32(maxSurge) - minAvailableReplicas + framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) + Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(HaveOccurred()) + + // The desired replicas wait makes sure that the RS controller has created expected number of pods. + framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) + secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + err = framework.WaitForReplicaSetDesiredReplicas(c.ExtensionsV1beta1(), secondRS) + Expect(err).NotTo(HaveOccurred()) + + // Check the deployment's minimum availability. + framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) + if deployment.Status.AvailableReplicas < minAvailableReplicas { + Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(HaveOccurred()) + } + + // Scale the deployment to 30 replicas. + newReplicas = int32(30) + framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + update.Spec.Replicas = &newReplicas + }) + Expect(err).NotTo(HaveOccurred()) + + framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) + firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + secondRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. + // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. + framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") + Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(HaveOccurred()) + + // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. + // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. + framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") + Expect(framework.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(HaveOccurred()) +} + func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { rsList := listDeploymentReplicaSets(c, ns, label) for _, rs := range rsList.Items { diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index 22bc5bc9ba1..3ad6f17222c 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -144,6 +144,7 @@ go_library( "//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/scale:go_default_library", diff --git a/test/e2e/framework/rs_util.go b/test/e2e/framework/rs_util.go index ce1a573f8e7..e0708b35d4d 100644 --- a/test/e2e/framework/rs_util.go +++ b/test/e2e/framework/rs_util.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" testutils "k8s.io/kubernetes/test/utils" ) @@ -70,6 +71,54 @@ func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error { return err } +// WaitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. +func WaitForReplicaSetDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) error { + desiredGeneration := replicaSet.Generation + err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { + rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil + }) + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name) + } + return err +} + +// WaitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum +func WaitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error { + desiredGeneration := replicaSet.Generation + err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { + rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil + }) + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name) + } + return err +} + +// WaitForReplicaSetTargetAvailableReplicas waits for .status.availableReplicas of a RS to equal targetReplicaNum +func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet *extensions.ReplicaSet, targetReplicaNum int32) error { + desiredGeneration := replicaSet.Generation + err := wait.PollImmediate(Poll, pollShortTimeout, func() (bool, error) { + rs, err := c.ExtensionsV1beta1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.AvailableReplicas == targetReplicaNum, nil + }) + if err == wait.ErrWaitTimeout { + err = fmt.Errorf("replicaset %q never had desired number of .status.availableReplicas", replicaSet.Name) + } + return err +} + func RunReplicaSet(config testutils.ReplicaSetConfig) error { By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) config.NodeDumpFunc = DumpNodeDebugInfo