diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index f1d7677e30e..7c5cece3428 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -18,6 +18,7 @@ package e2e import ( "fmt" + "math/rand" "strings" "time" @@ -95,6 +96,12 @@ var _ = framework.KubeDescribe("Deployment", func() { It("overlapping deployment should not fight with each other", func() { testOverlappingDeployment(f) }) + It("lack of progress should be reported in the deployment status", func() { + testFailedDeployment(f) + }) + It("iterative rollouts should eventually progress", func() { + testIterativeDeployments(f) + }) // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues // See https://github.com/kubernetes/kubernetes/issues/29229 }) @@ -418,8 +425,8 @@ func testRollingUpdateDeploymentEvents(f *framework.Framework) { newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nil)) - Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 1", newRS.Name))) - Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName))) + Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Created new replica set %q and scaled up to 1", newRS.Name))) + Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %q to 0", rsName))) } func testRecreateDeployment(f *framework.Framework) { @@ -470,8 +477,8 @@ func testRecreateDeployment(f *framework.Framework) { newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nil)) - Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName))) - Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name))) + Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %q to 0", rsName))) + Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Created new replica set %q and scaled up to 3", newRS.Name))) } // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy @@ -1316,3 +1323,173 @@ func testOverlappingDeployment(f *framework.Framework) { err = framework.WaitForDeploymentRevisionAndImage(c, ns, deployOverlapping.Name, "2", redisImage) Expect(err).NotTo(HaveOccurred(), "The second deployment failed to update to revision 2") } + +func testFailedDeployment(f *framework.Framework) { + ns := f.Namespace.Name + c := f.ClientSet + + podLabels := map[string]string{"name": nginxImageName} + replicas := int32(1) + + // Create a nginx deployment. + deploymentName := "nginx" + nonExistentImage := "nginx:not-there" + thirty := int32(30) + d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nonExistentImage, extensions.RecreateDeploymentStrategyType, nil) + d.Spec.ProgressDeadlineSeconds = &thirty + + framework.Logf("Creating deployment %q with progressDeadlineSeconds set to %ds and a non-existent image", deploymentName, thirty) + deployment, err := c.Extensions().Deployments(ns).Create(d) + Expect(err).NotTo(HaveOccurred()) + + framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) + Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + + framework.Logf("Waiting for deployment %q status", deploymentName) + Expect(framework.WaitForDeploymentStatus(c, deployment)).NotTo(HaveOccurred()) + + framework.Logf("Checking deployment %q for a timeout condition", deploymentName) + Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.TimedOutReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) + + framework.Logf("Updating deployment %q with a good image", deploymentName) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = nginxImage + }) + Expect(err).NotTo(HaveOccurred()) + + framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) + Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + + framework.Logf("Waiting for deployment %q status", deploymentName) + Expect(framework.WaitForDeploymentStatus(c, deployment)).NotTo(HaveOccurred()) + + framework.Logf("Checking deployment %q for a complete condition", deploymentName) + Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) +} + +func randomScale(d *extensions.Deployment, i int) { + switch r := rand.Float32(); { + case r < 0.3: + framework.Logf("%02d: scaling up", i) + d.Spec.Replicas++ + case r < 0.6: + if d.Spec.Replicas > 1 { + framework.Logf("%02d: scaling down", i) + d.Spec.Replicas-- + } + } +} + +func testIterativeDeployments(f *framework.Framework) { + ns := f.Namespace.Name + c := f.ClientSet + + podLabels := map[string]string{"name": nginxImageName} + replicas := int32(6) + zero := int64(0) + two := int32(2) + + // Create a nginx deployment. + deploymentName := "nginx" + thirty := int32(30) + d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) + d.Spec.ProgressDeadlineSeconds = &thirty + d.Spec.RevisionHistoryLimit = &two + d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero + framework.Logf("Creating deployment %q", deploymentName) + deployment, err := c.Extensions().Deployments(ns).Create(d) + Expect(err).NotTo(HaveOccurred()) + + iterations := 20 + for i := 0; i < iterations; i++ { + if r := rand.Float32(); r < 0.6 { + time.Sleep(time.Duration(float32(i) * r * float32(time.Second))) + } + + switch n := rand.Float32(); { + case n < 0.2: + // trigger a new deployment + framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + newEnv := api.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} + update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) + randomScale(update, i) + }) + Expect(err).NotTo(HaveOccurred()) + + case n < 0.4: + // rollback to the previous version + framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + rollbackTo := &extensions.RollbackConfig{Revision: 0} + update.Spec.RollbackTo = rollbackTo + }) + Expect(err).NotTo(HaveOccurred()) + + case n < 0.6: + // just scaling + framework.Logf("%02d: scaling deployment %q", i, deployment.Name) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + randomScale(update, i) + }) + Expect(err).NotTo(HaveOccurred()) + + case n < 0.8: + // toggling the deployment + if deployment.Spec.Paused { + framework.Logf("%02d: pausing deployment %q", i, deployment.Name) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + update.Spec.Paused = true + randomScale(update, i) + }) + Expect(err).NotTo(HaveOccurred()) + } else { + framework.Logf("%02d: resuming deployment %q", i, deployment.Name) + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + update.Spec.Paused = false + randomScale(update, i) + }) + Expect(err).NotTo(HaveOccurred()) + } + + default: + // arbitrarily delete deployment pods + framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) + selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) + Expect(err).NotTo(HaveOccurred()) + opts := api.ListOptions{LabelSelector: selector} + podList, err := c.Core().Pods(ns).List(opts) + Expect(err).NotTo(HaveOccurred()) + if len(podList.Items) == 0 { + framework.Logf("%02d: no deployment pods to delete", i) + continue + } + for p := range podList.Items { + if rand.Float32() < 0.5 { + continue + } + name := podList.Items[p].Name + framework.Logf("%02d: deleting deployment pod %q", i, name) + Expect(c.Core().Pods(ns).Delete(name, nil)).NotTo(HaveOccurred()) + } + } + } + + // unpause the deployment if we end up pausing it + deployment, err = c.Extensions().Deployments(ns).Get(deployment.Name) + Expect(err).NotTo(HaveOccurred()) + if deployment.Spec.Paused { + deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { + update.Spec.Paused = false + }) + } + + framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) + Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) + + framework.Logf("Waiting for deployment %q status", deploymentName) + Expect(framework.WaitForDeploymentStatusValid(c, deployment)).NotTo(HaveOccurred()) + + framework.Logf("Checking deployment %q for a complete condition", deploymentName) + Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) +} diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 2c98f78d617..2eea6f42f37 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -3173,6 +3173,23 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute) } +func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error { + var conditions []extensions.DeploymentCondition + pollErr := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { + deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) + if err != nil { + return false, err + } + conditions = deployment.Status.Conditions + cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType) + return cond != nil && cond.Reason == reason, nil + }) + if pollErr == wait.ErrWaitTimeout { + pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deploymentName, conditions) + } + return pollErr +} + func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) { minReadySeconds := deployment.Spec.MinReadySeconds podList, err := deploymentutil.ListPods(deployment, diff --git a/test/test_owners.csv b/test/test_owners.csv index 1abf7a569b2..6b77a458d95 100644 --- a/test/test_owners.csv +++ b/test/test_owners.csv @@ -63,6 +63,8 @@ Deployment deployment should label adopted RSs and pods,pwittrock,0 Deployment deployment should support rollback,pwittrock,0 Deployment deployment should support rollback when there's replica set with no revision,pwittrock,0 Deployment deployment should support rollover,pwittrock,0 +Deployment iterative rollouts should eventually progress,kargakis,0 +Deployment lack of progress should be reported in the deployment status,kargakis,0 Deployment overlapping deployment should not fight with each other,kargakis,1 Deployment paused deployment should be able to scale,kargakis,1 Deployment paused deployment should be ignored by the controller,kargakis,0