diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 6b0cca1d2d4..b6bb7d4fbe4 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -69,7 +69,6 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", - "//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library", ], ) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 77845b695df..b8e845a5a96 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -35,7 +35,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" - extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" @@ -87,10 +86,6 @@ var _ = SIGDescribe("Deployment", func() { It("deployment should support rollback", func() { testRollbackDeployment(f) }) - It("scaled rollout deployment should not block on annotation check", func() { - testScaledRolloutDeployment(f) - }) - It("iterative rollouts should eventually progress", func() { testIterativeDeployments(f) }) @@ -621,159 +616,6 @@ func testRollbackDeployment(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) } -func testScaledRolloutDeployment(f *framework.Framework) { - ns := f.Namespace.Name - c := f.ClientSet - - podLabels := map[string]string{"name": NginxImageName} - replicas := int32(10) - - // Create a nginx deployment. - deploymentName := "nginx" - d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType) - d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment) - d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) - d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) - - framework.Logf("Creating deployment %q", deploymentName) - deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Waiting for observed generation %d", deployment.Generation) - Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred()) - - // Verify that the required pods have come up. - framework.Logf("Waiting for all required pods to come up") - err = framework.VerifyPodsRunning(f.ClientSet, ns, NginxImageName, false, *(deployment.Spec.Replicas)) - Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err) - - framework.Logf("Waiting for deployment %q to complete", deployment.Name) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) - - first, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) - Expect(err).NotTo(HaveOccurred()) - - // Update the deployment with a non-existent image so that the new replica set will be blocked. - framework.Logf("Updating deployment %q with a non-existent image", deploymentName) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { - update.Spec.Template.Spec.Containers[0].Image = "nginx:404" - }) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Waiting for observed generation %d", deployment.Generation) - err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) - - deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) { - Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred()) - } - - framework.Logf("Checking that the replica sets for %q are synced", deploymentName) - second, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) - Expect(err).NotTo(HaveOccurred()) - - first, err = c.ExtensionsV1beta1().ReplicaSets(first.Namespace).Get(first.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - firstCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), first) - err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond) - Expect(err).NotTo(HaveOccurred()) - - secondCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), second) - err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Updating the size (up) and template at the same time for deployment %q", deploymentName) - newReplicas := int32(20) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { - update.Spec.Replicas = &newReplicas - update.Spec.Template.Spec.Containers[0].Image = NautilusImage - }) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) - - oldRSs, _, rs, err := deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1()) - Expect(err).NotTo(HaveOccurred()) - - for _, rs := range append(oldRSs, rs) { - framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name) - desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) - if !ok || desired == *(deployment.Spec.Replicas) { - continue - } - err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name) - Expect(err).NotTo(HaveOccurred()) - } - - // Update the deployment with a non-existent image so that the new replica set will be blocked. - framework.Logf("Updating deployment %q with a non-existent image", deploymentName) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { - update.Spec.Template.Spec.Containers[0].Image = "nginx:404" - }) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Waiting for observed generation %d", deployment.Generation) - err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) - - deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - if deployment.Status.AvailableReplicas < deploymentutil.MinAvailable(deployment) { - Expect(fmt.Errorf("Observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment))).NotTo(HaveOccurred()) - } - - framework.Logf("Checking that the replica sets for %q are synced", deploymentName) - oldRs, err := c.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - newRs, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1()) - Expect(err).NotTo(HaveOccurred()) - - oldCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), oldRs) - err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond) - Expect(err).NotTo(HaveOccurred()) - - newCond := replicaSetHasDesiredReplicas(c.ExtensionsV1beta1(), newRs) - err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Updating the size (down) and template at the same time for deployment %q", deploymentName) - newReplicas = int32(5) - deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { - update.Spec.Replicas = &newReplicas - update.Spec.Template.Spec.Containers[0].Image = KittenImage - }) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) - Expect(err).NotTo(HaveOccurred()) - - framework.Logf("Waiting for deployment status to sync (current available: %d, minimum available: %d)", deployment.Status.AvailableReplicas, deploymentutil.MinAvailable(deployment)) - Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred()) - - oldRSs, _, rs, err = deploymentutil.GetAllReplicaSets(deployment, c.ExtensionsV1beta1()) - Expect(err).NotTo(HaveOccurred()) - - for _, rs := range append(oldRSs, rs) { - framework.Logf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name) - desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) - if !ok || desired == *(deployment.Spec.Replicas) { - continue - } - err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name) - Expect(err).NotTo(HaveOccurred()) - } -} - func randomScale(d *extensions.Deployment, i int) { switch r := rand.Float32(); { case r < 0.3: @@ -904,17 +746,6 @@ func testIterativeDeployments(f *framework.Framework) { Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) } -func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc { - desiredGeneration := replicaSet.Generation - return func() (bool, error) { - rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil - } -} - func testDeploymentsControllerRef(f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet @@ -954,16 +785,6 @@ func testDeploymentsControllerRef(f *framework.Framework) { Expect(err).NotTo(HaveOccurred()) } -func waitDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) func() (bool, error) { - return func() (bool, error) { - err := checkDeploymentReplicaSetsControllerRef(c, ns, uid, label) - if err != nil { - return false, nil - } - return true, nil - } -} - func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error { rsList := listDeploymentReplicaSets(c, ns, label) for _, rs := range rsList.Items { diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index ee29e8243d0..4e88a855b86 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -42,6 +42,7 @@ go_library( "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library", diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go index deb4807a052..2745b3f0d61 100644 --- a/test/integration/deployment/deployment_test.go +++ b/test/integration/deployment/deployment_test.go @@ -876,3 +876,195 @@ func TestOverlappingDeployments(t *testing.T) { } } } + +// Deployment should not block rollout when updating spec replica number and template at the same time. +func TestScaledRolloutDeployment(t *testing.T) { + s, closeFn, rm, dc, informers, c := dcSetup(t) + defer closeFn() + name := "test-scaled-rollout-deployment" + ns := framework.CreateTestingNamespace(name, s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + stopCh := make(chan struct{}) + defer close(stopCh) + informers.Start(stopCh) + go rm.Run(5, stopCh) + go dc.Run(5, stopCh) + + // Create a deployment with rolling update strategy, max surge = 3, and max unavailable = 2 + var err error + replicas := int32(10) + tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} + tester.deployment.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) + tester.deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) + tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Create(tester.deployment) + if err != nil { + t.Fatalf("failed to create deployment %q: %v", name, err) + } + if err = tester.waitForDeploymentRevisionAndImage("1", fakeImage); err != nil { + t.Fatal(err) + } + if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil { + t.Fatalf("deployment %q failed to complete: %v", name, err) + } + + // Record current replicaset before starting new rollout + firstRS, err := tester.expectNewReplicaSet() + if err != nil { + t.Fatal(err) + } + + // Update the deployment with another new image but do not mark the pods as ready to block new replicaset + fakeImage2 := "fakeimage2" + tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = fakeImage2 + }) + if err != nil { + t.Fatalf("failed updating deployment %q: %v", name, err) + } + if err = tester.waitForDeploymentRevisionAndImage("2", fakeImage2); err != nil { + t.Fatal(err) + } + + // Verify the deployment has minimum available replicas after 2nd rollout + tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get deployment %q: %v", name, err) + } + minAvailableReplicas := deploymentutil.MinAvailable(tester.deployment) + if tester.deployment.Status.AvailableReplicas < minAvailableReplicas { + t.Fatalf("deployment %q does not have minimum number of available replicas after 2nd rollout", name) + } + + // Wait for old replicaset of 1st rollout to have desired replicas + firstRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(firstRS.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get replicaset %q: %v", firstRS.Name, err) + } + if err = tester.waitRSStable(firstRS); err != nil { + t.Fatal(err) + } + + // Wait for new replicaset of 2nd rollout to have desired replicas + secondRS, err := tester.expectNewReplicaSet() + if err != nil { + t.Fatal(err) + } + if err = tester.waitRSStable(secondRS); err != nil { + t.Fatal(err) + } + + // Scale up the deployment and update its image to another new image simultaneously (this time marks all pods as ready) + newReplicas := int32(20) + fakeImage3 := "fakeimage3" + tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + update.Spec.Replicas = &newReplicas + update.Spec.Template.Spec.Containers[0].Image = fakeImage3 + }) + if err != nil { + t.Fatalf("failed updating deployment %q: %v", name, err) + } + if err = tester.waitForDeploymentRevisionAndImage("3", fakeImage3); err != nil { + t.Fatal(err) + } + if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil { + t.Fatalf("deployment %q failed to complete: %v", name, err) + } + + // Verify every replicaset has correct desiredReplicas annotation after 3rd rollout + thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + if err != nil { + t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err) + } + rss := []*v1beta1.ReplicaSet{firstRS, secondRS, thirdRS} + for _, curRS := range rss { + curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) + } + desired, ok := deploymentutil.GetDesiredReplicasAnnotation(curRS) + if !ok { + t.Fatalf("failed to retrieve desiredReplicas annotation for replicaset %q", curRS.Name) + } + if desired != *(tester.deployment.Spec.Replicas) { + t.Fatalf("unexpected desiredReplicas annotation for replicaset %q: expected %d, got %d", curRS.Name, *(tester.deployment.Spec.Replicas), desired) + } + } + + // Update the deployment with another new image but do not mark the pods as ready to block new replicaset + fakeImage4 := "fakeimage4" + tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + update.Spec.Template.Spec.Containers[0].Image = fakeImage4 + }) + if err != nil { + t.Fatalf("failed updating deployment %q: %v", name, err) + } + if err = tester.waitForDeploymentRevisionAndImage("4", fakeImage4); err != nil { + t.Fatal(err) + } + + // Verify the deployment has minimum available replicas after 4th rollout + tester.deployment, err = c.ExtensionsV1beta1().Deployments(ns.Name).Get(name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get deployment %q: %v", name, err) + } + minAvailableReplicas = deploymentutil.MinAvailable(tester.deployment) + if tester.deployment.Status.AvailableReplicas < minAvailableReplicas { + t.Fatalf("deployment %q does not have minimum number of available replicas after 4th rollout", name) + } + + // Wait for old replicaset of 3rd rollout to have desired replicas + thirdRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(thirdRS.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get replicaset %q: %v", thirdRS.Name, err) + } + if err = tester.waitRSStable(thirdRS); err != nil { + t.Fatal(err) + } + + // Wait for new replicaset of 4th rollout to have desired replicas + fourthRS, err := tester.expectNewReplicaSet() + if err != nil { + t.Fatal(err) + } + if err = tester.waitRSStable(fourthRS); err != nil { + t.Fatal(err) + } + + // Scale down the deployment and update its image to another new image simultaneously (this time marks all pods as ready) + newReplicas = int32(5) + fakeImage5 := "fakeimage5" + tester.deployment, err = tester.updateDeployment(func(update *v1beta1.Deployment) { + update.Spec.Replicas = &newReplicas + update.Spec.Template.Spec.Containers[0].Image = fakeImage5 + }) + if err != nil { + t.Fatalf("failed updating deployment %q: %v", name, err) + } + if err = tester.waitForDeploymentRevisionAndImage("5", fakeImage5); err != nil { + t.Fatal(err) + } + if err = tester.waitForDeploymentCompleteAndMarkPodsReady(); err != nil { + t.Fatalf("deployment %q failed to complete: %v", name, err) + } + + // Verify every replicaset has correct desiredReplicas annotation after 5th rollout + fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.ExtensionsV1beta1()) + if err != nil { + t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err) + } + rss = []*v1beta1.ReplicaSet{thirdRS, fourthRS, fifthRS} + for _, curRS := range rss { + curRS, err = c.ExtensionsV1beta1().ReplicaSets(ns.Name).Get(curRS.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("failed to get replicaset when checking desired replicas annotation: %v", err) + } + desired, ok := deploymentutil.GetDesiredReplicasAnnotation(curRS) + if !ok { + t.Fatalf("failed to retrieve desiredReplicas annotation for replicaset %q", curRS.Name) + } + if desired != *(tester.deployment.Spec.Replicas) { + t.Fatalf("unexpected desiredReplicas annotation for replicaset %q: expected %d, got %d", curRS.Name, *(tester.deployment.Spec.Replicas), desired) + } + } +} diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index cfc33fa6ab4..dc196c92a33 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -26,6 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" @@ -80,7 +81,8 @@ func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment { Replicas: &replicas, Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, Strategy: v1beta1.DeploymentStrategy{ - Type: v1beta1.RollingUpdateDeploymentStrategyType, + Type: v1beta1.RollingUpdateDeploymentStrategyType, + RollingUpdate: new(v1beta1.RollingUpdateDeployment), }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -212,6 +214,11 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { return err } +func intOrStrP(num int) *intstr.IntOrString { + intstr := intstr.FromInt(num) + return &intstr +} + // markUpdatedPodsReady manually marks updated Deployment pods status to ready, // until the deployment is complete func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) { @@ -405,3 +412,7 @@ func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) { } return ownedPods, nil } + +func (d *deploymentTester) waitRSStable(replicaset *v1beta1.ReplicaSet) error { + return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout) +} diff --git a/test/integration/replicaset/BUILD b/test/integration/replicaset/BUILD index ea194022e26..58575e50ea0 100644 --- a/test/integration/replicaset/BUILD +++ b/test/integration/replicaset/BUILD @@ -18,6 +18,7 @@ go_test( "//pkg/api/v1/pod:go_default_library", "//pkg/controller/replicaset:go_default_library", "//test/integration/framework:go_default_library", + "//test/utils:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/integration/replicaset/replicaset_test.go b/test/integration/replicaset/replicaset_test.go index 7ad3489c7c9..91ae5ce7ffc 100644 --- a/test/integration/replicaset/replicaset_test.go +++ b/test/integration/replicaset/replicaset_test.go @@ -41,6 +41,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/test/integration/framework" + testutil "k8s.io/kubernetes/test/utils" ) const ( @@ -217,15 +218,8 @@ func createRSsPods(t *testing.T, clientSet clientset.Interface, rss []*v1beta1.R // Verify .Status.Replicas is equal to .Spec.Replicas func waitRSStable(t *testing.T, clientSet clientset.Interface, rs *v1beta1.ReplicaSet) { - rsClient := clientSet.Extensions().ReplicaSets(rs.Namespace) - if err := wait.PollImmediate(interval, timeout, func() (bool, error) { - newRS, err := rsClient.Get(rs.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - return newRS.Status.Replicas == *rs.Spec.Replicas, nil - }); err != nil { - t.Fatalf("Failed to verify .Status.Replicas is equal to .Spec.Replicas for rs %s: %v", rs.Name, err) + if err := testutil.WaitRSStable(t, clientSet, rs, interval, timeout); err != nil { + t.Fatal(err) } } diff --git a/test/utils/replicaset.go b/test/utils/replicaset.go index 9a4b6d05b90..adf30c35d06 100644 --- a/test/utils/replicaset.go +++ b/test/utils/replicaset.go @@ -18,6 +18,7 @@ package utils import ( "fmt" + "testing" "time" extensions "k8s.io/api/extensions/v1beta1" @@ -50,3 +51,18 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, } return rs, pollErr } + +// Verify .Status.Replicas is equal to .Spec.Replicas +func WaitRSStable(t *testing.T, clientSet clientset.Interface, rs *extensions.ReplicaSet, pollInterval, pollTimeout time.Duration) error { + desiredGeneration := rs.Generation + if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + newRS, err := clientSet.ExtensionsV1beta1().ReplicaSets(rs.Namespace).Get(rs.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return newRS.Status.ObservedGeneration >= desiredGeneration && newRS.Status.Replicas == *rs.Spec.Replicas, nil + }); err != nil { + return fmt.Errorf("failed to verify .Status.Replicas is equal to .Spec.Replicas for replicaset %q: %v", rs.Name, err) + } + return nil +}