diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index bd904438e4d..ee29e8243d0 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -22,6 +22,7 @@ go_test( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go index 828c4cb7733..deb4807a052 100644 --- a/test/integration/deployment/deployment_test.go +++ b/test/integration/deployment/deployment_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/test/integration/framework" @@ -79,6 +80,120 @@ func TestNewDeployment(t *testing.T) { } } +// Deployments should support roll out, roll back, and roll over +func TestDeploymentRollingUpdate(t *testing.T) { + s, closeFn, rm, dc, informers, c := dcSetup(t) + defer closeFn() + name := "test-rolling-update-deployment" + ns := framework.CreateTestingNamespace(name, s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + // Start informer and controllers + stopCh := make(chan struct{}) + defer close(stopCh) + informers.Start(stopCh) + go rm.Run(5, stopCh) + go dc.Run(5, stopCh) + + replicas := int32(20) + tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} + tester.deployment.Spec.MinReadySeconds = 4 + quarter := intstr.FromString("25%") + tester.deployment.Spec.Strategy.RollingUpdate = &v1beta1.RollingUpdateDeployment{ + MaxUnavailable: &quarter, + MaxSurge: &quarter, + } + + // Create a deployment. + var err error + tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + if err != nil { + t.Fatalf("failed to create deployment %s: %v", tester.deployment.Name, err) + } + oriImage := tester.deployment.Spec.Template.Spec.Containers[0].Image + if err := tester.waitForDeploymentRevisionAndImage("1", oriImage); err != nil { + t.Fatal(err) + } + if err := tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil { + t.Fatal(err) + } + + // 1. Roll out a new image. + image := "new-image" + if oriImage == image { + t.Fatalf("bad test setup, deployment %s roll out with the same image", tester.deployment.Name) + } + imageFn := func(update *v1beta1.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = image + } + tester.deployment, err = tester.updateDeployment(imageFn) + if err != nil { + t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err) + } + if err := tester.waitForDeploymentRevisionAndImage("2", image); err != nil { + t.Fatal(err) + } + if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil { + t.Fatal(err) + } + + // 2. Roll back to the last revision. + revision := int64(0) + rollback := newDeploymentRollback(tester.deployment.Name, nil, revision) + if err = c.ExtensionsV1beta1().Deployments(ns.Name).Rollback(rollback); err != nil { + t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err) + } + // Wait for the deployment to start rolling back + if err = tester.waitForDeploymentRollbackCleared(); err != nil { + t.Fatalf("failed to roll back deployment %s to last revision: %v", tester.deployment.Name, err) + } + // Wait for the deployment to be rolled back to the template stored in revision 1 and rolled forward to revision 3. + if err := tester.waitForDeploymentRevisionAndImage("3", oriImage); err != nil { + t.Fatal(err) + } + if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil { + t.Fatal(err) + } + + // 3. Roll over a deployment before the previous rolling update finishes. + image = "dont-finish" + imageFn = func(update *v1beta1.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = image + } + tester.deployment, err = tester.updateDeployment(imageFn) + if err != nil { + t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err) + } + if err := tester.waitForDeploymentRevisionAndImage("4", image); err != nil { + t.Fatal(err) + } + // We don't mark pods as ready so that rollout won't finish. + // Before the rollout finishes, trigger another rollout. + image = "rollover" + imageFn = func(update *v1beta1.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = image + } + tester.deployment, err = tester.updateDeployment(imageFn) + if err != nil { + t.Fatalf("failed to update deployment %s: %v", tester.deployment.Name, err) + } + if err := tester.waitForDeploymentRevisionAndImage("5", image); err != nil { + t.Fatal(err) + } + if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil { + t.Fatal(err) + } + _, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.ExtensionsV1beta1()) + if err != nil { + t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err) + } + for _, oldRS := range allOldRSs { + if *oldRS.Spec.Replicas != 0 { + t.Errorf("expected old replicaset %s of deployment %s to have 0 replica, got %d", oldRS.Name, tester.deployment.Name, *oldRS.Spec.Replicas) + } + } +} + // selectors are IMMUTABLE for all API versions except apps/v1beta1 and extensions/v1beta1 func TestDeploymentSelectorImmutability(t *testing.T) { s, closeFn, c := dcSimpleSetup(t) diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index 4faca412cb3..cfc33fa6ab4 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -19,6 +19,7 @@ package deployment import ( "fmt" "net/http/httptest" + "sync" "testing" "time" @@ -211,40 +212,50 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { return err } -// markUpdatedPodsReady manually marks updated Deployment pods status to ready -func (d *deploymentTester) markUpdatedPodsReady() { +// markUpdatedPodsReady manually marks updated Deployment pods status to ready, +// until the deployment is complete +func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) { + defer wg.Done() + ns := d.deployment.Namespace - var readyPods int32 err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { - readyPods = 0 + // We're done when the deployment is complete + if completed, err := d.deploymentComplete(); err != nil { + return false, err + } else if completed { + return true, nil + } + // Otherwise, mark remaining pods as ready pods, err := d.listUpdatedPods() if err != nil { d.t.Log(err) return false, nil } - if len(pods) != int(*d.deployment.Spec.Replicas) { - d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas) - return false, nil - } + d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas) for i := range pods { pod := pods[i] if podutil.IsPodReady(&pod) { - readyPods++ continue } if err = markPodReady(d.c, ns, &pod); err != nil { d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err) - } else { - readyPods++ } } - return readyPods >= *d.deployment.Spec.Replicas, nil + return false, nil }) if err != nil { d.t.Fatalf("failed to mark updated Deployment pods to ready: %v", err) } } +func (d *deploymentTester) deploymentComplete() (bool, error) { + latest, err := d.c.ExtensionsV1beta1().Deployments(d.deployment.Namespace).Get(d.deployment.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return deploymentutil.DeploymentComplete(d.deployment, &latest.Status), nil +} + // Waits for the deployment to complete, and check rolling update strategy isn't broken at any times. // Rolling update strategy should not be broken during a rolling update. func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error { @@ -262,28 +273,42 @@ func (d *deploymentTester) waitForDeploymentComplete() error { // while marking updated Deployment pods as ready at the same time. // Uses hard check to make sure rolling update strategy is not violated at any times. func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error { + var wg sync.WaitGroup + // Manually mark updated Deployment pods as ready in a separate goroutine - go d.markUpdatedPodsReady() + wg.Add(1) + go d.markUpdatedPodsReady(&wg) // Wait for the Deployment status to complete while Deployment pods are becoming ready err := d.waitForDeploymentCompleteAndCheckRolling() if err != nil { return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err) } + + // Wait for goroutine to finish + wg.Wait() + return nil } // waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete // while marking updated Deployment pods as ready at the same time. func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error { + var wg sync.WaitGroup + // Manually mark updated Deployment pods as ready in a separate goroutine - go d.markUpdatedPodsReady() + wg.Add(1) + go d.markUpdatedPodsReady(&wg) // Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready err := d.waitForDeploymentComplete() if err != nil { return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err) } + + // Wait for goroutine to finish + wg.Wait() + return nil }