From 1ced5ae22cdb772cd743059a5ca270b59946998e Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Fri, 12 May 2017 12:44:31 -0700 Subject: [PATCH 1/3] Add integration test for deployment --- hack/.linted_packages | 1 + test/integration/BUILD | 1 + test/integration/deployment/BUILD | 56 +++ test/integration/deployment/OWNERS | 6 + .../integration/deployment/deployment_test.go | 71 ++++ test/integration/deployment/util.go | 353 ++++++++++++++++++ 6 files changed, 488 insertions(+) create mode 100644 test/integration/deployment/BUILD create mode 100644 test/integration/deployment/OWNERS create mode 100644 test/integration/deployment/deployment_test.go create mode 100644 test/integration/deployment/util.go diff --git a/hack/.linted_packages b/hack/.linted_packages index 2127a2dccf6..f3ef847cdf6 100644 --- a/hack/.linted_packages +++ b/hack/.linted_packages @@ -438,6 +438,7 @@ test/integration/apiserver test/integration/client test/integration/configmap test/integration/defaulttolerationseconds +test/integration/deployment test/integration/etcd test/integration/examples test/integration/federation diff --git a/test/integration/BUILD b/test/integration/BUILD index 3e12a4a5089..d465c6510c0 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -39,6 +39,7 @@ filegroup( "//test/integration/client:all-srcs", "//test/integration/configmap:all-srcs", "//test/integration/defaulttolerationseconds:all-srcs", + "//test/integration/deployment:all-srcs", "//test/integration/etcd:all-srcs", "//test/integration/evictions:all-srcs", "//test/integration/examples:all-srcs", diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD new file mode 100644 index 00000000000..8f2558fff64 --- /dev/null +++ b/test/integration/deployment/BUILD @@ -0,0 +1,56 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", + "go_test", +) + +go_test( + name = "go_default_test", + srcs = ["deployment_test.go"], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/controller/deployment/util:go_default_library", + "//test/integration/framework:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["util.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/extensions/v1beta1:go_default_library", + "//pkg/client/clientset_generated/clientset:go_default_library", + "//pkg/client/informers/informers_generated/externalversions:go_default_library", + "//pkg/controller/deployment:go_default_library", + "//pkg/controller/deployment/util:go_default_library", + "//pkg/controller/replicaset:go_default_library", + "//pkg/util/labels:go_default_library", + "//test/integration/framework:go_default_library", + "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/rest:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/test/integration/deployment/OWNERS b/test/integration/deployment/OWNERS new file mode 100644 index 00000000000..2135e78771e --- /dev/null +++ b/test/integration/deployment/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - janetkuo + - kargakis +approvers: + - janetkuo + - kargakis diff --git a/test/integration/deployment/deployment_test.go b/test/integration/deployment/deployment_test.go new file mode 100644 index 00000000000..4d603751407 --- /dev/null +++ b/test/integration/deployment/deployment_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "testing" + + "k8s.io/kubernetes/pkg/api/v1" + deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + "k8s.io/kubernetes/test/integration/framework" +) + +func TestNewDeployment(t *testing.T) { + s, closeFn, rm, dc, informers, c := dcSetup(t) + defer closeFn() + name := "test-new-deployment" + ns := framework.CreateTestingNamespace(name, s, t) + defer framework.DeleteTestingNamespace(ns, s, t) + + replicas := int32(20) + tester := &deploymentTester{t: t, c: c, deployment: newDeployment(name, ns.Name, replicas)} + tester.deployment.Spec.MinReadySeconds = 4 + + tester.deployment.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} + deploy, err := c.Extensions().Deployments(ns.Name).Create(tester.deployment) + if err != nil { + t.Fatalf("failed to create deployment %s: %v", deploy.Name, err) + } + + // Start informer and controllers + stopCh := make(chan struct{}) + defer close(stopCh) + informers.Start(stopCh) + go rm.Run(5, stopCh) + go dc.Run(5, stopCh) + + // Wait for the Deployment to be updated to revision 1 + err = tester.waitForDeploymentRevisionAndImage("1", fakeImage) + if err != nil { + t.Fatalf("failed to wait for Deployment revision %s: %v", deploy.Name, err) + } + + // Make sure the Deployment status becomes valid while manually marking Deployment pods as ready at the same time + tester.waitForDeploymentStatusValidAndMarkPodsReady() + + // Check new RS annotations + newRS, err := deploymentutil.GetNewReplicaSet(deploy, c) + if err != nil { + t.Fatalf("failed to get new ReplicaSet of Deployment %s: %v", deploy.Name, err) + } + if newRS.Annotations["test"] != "should-copy-to-replica-set" { + t.Errorf("expected new ReplicaSet annotations copied from Deployment %s, got: %v", deploy.Name, newRS.Annotations) + } + if newRS.Annotations[v1.LastAppliedConfigAnnotation] != "" { + t.Errorf("expected new ReplicaSet last-applied annotation not copied from Deployment %s", deploy.Name) + } +} diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go new file mode 100644 index 00000000000..d1045b0f199 --- /dev/null +++ b/test/integration/deployment/util.go @@ -0,0 +1,353 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "fmt" + "net/http/httptest" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" + "k8s.io/kubernetes/pkg/controller/deployment" + deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + "k8s.io/kubernetes/pkg/controller/replicaset" + labelsutil "k8s.io/kubernetes/pkg/util/labels" + "k8s.io/kubernetes/test/integration/framework" +) + +const ( + pollInterval = 1 * time.Second + pollTimeout = 60 * time.Second + + fakeImageName = "fake-name" + fakeImage = "fakeimage" +) + +type deploymentTester struct { + t *testing.T + c clientset.Interface + deployment *v1beta1.Deployment +} + +func testLabels() map[string]string { + return map[string]string{"name": "test"} +} + +// newDeployment returns a RollingUpdate Deployment with with a fake container image +func newDeployment(name, ns string, replicas int32) *v1beta1.Deployment { + return &v1beta1.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: "extensions/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Spec: v1beta1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, + Strategy: v1beta1.DeploymentStrategy{ + Type: v1beta1.RollingUpdateDeploymentStrategyType, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: testLabels(), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: fakeImageName, + Image: fakeImage, + }, + }, + }, + }, + }, + } +} + +// dcSetup sets up necessities for Deployment integration test, including master, apiserver, informers, and clientset +func dcSetup(t *testing.T) (*httptest.Server, framework.CloseFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) { + masterConfig := framework.NewIntegrationTestMasterConfig() + _, s, closeFn := framework.RunAMaster(masterConfig) + + config := restclient.Config{Host: s.URL} + clientSet, err := clientset.NewForConfig(&config) + if err != nil { + t.Fatalf("error in create clientset: %v", err) + } + resyncPeriod := 12 * time.Hour + informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-informers")), resyncPeriod) + + dc := deployment.NewDeploymentController( + informers.Extensions().V1beta1().Deployments(), + informers.Extensions().V1beta1().ReplicaSets(), + informers.Core().V1().Pods(), + clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "deployment-controller")), + ) + rm := replicaset.NewReplicaSetController( + informers.Extensions().V1beta1().ReplicaSets(), + informers.Core().V1().Pods(), + clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")), + replicaset.BurstReplicas, + ) + return s, closeFn, rm, dc, informers, clientSet +} + +// addPodConditionReady sets given pod status to ready at given time +func addPodConditionReady(pod *v1.Pod, time metav1.Time) { + pod.Status = v1.PodStatus{ + Phase: v1.PodRunning, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionTrue, + LastTransitionTime: time, + }, + }, + } +} + +func (d *deploymentTester) logReplicaSetsOfDeployment(allOldRSs []*v1beta1.ReplicaSet, newRS *v1beta1.ReplicaSet) { + if newRS != nil { + d.t.Logf("New ReplicaSet of Deployment %s:\n%+v", d.deployment.Name, *newRS) + } else { + d.t.Logf("New ReplicaSet of Deployment %s is nil.", d.deployment.Name) + } + if len(allOldRSs) > 0 { + d.t.Logf("All old ReplicaSets of Deployment %s:", d.deployment.Name) + } + for i := range allOldRSs { + d.t.Logf(spew.Sprintf("%#v", *allOldRSs[i])) + } +} + +func (d *deploymentTester) logPodsOfDeployment(rsList []*v1beta1.ReplicaSet) { + minReadySeconds := d.deployment.Spec.MinReadySeconds + podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { + return d.c.Core().Pods(namespace).List(options) + } + + podList, err := deploymentutil.ListPods(d.deployment, rsList, podListFunc) + + if err != nil { + d.t.Logf("Failed to list Pods of Deployment %s: %v", d.deployment.Name, err) + return + } + for _, pod := range podList.Items { + availability := "not available" + if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { + availability = "available" + } + d.t.Logf("Pod %s is %s:\n%s", pod.Name, availability, spew.Sprintf("%#v", pod)) + } +} + +// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. +// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. +func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error { + var deployment *v1beta1.Deployment + var newRS *v1beta1.ReplicaSet + var reason string + deploymentName, ns := d.deployment.Name, d.deployment.Namespace + err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { + var err error + deployment, err = d.c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label + newRS, err = deploymentutil.GetNewReplicaSet(deployment, d.c) + if err != nil { + return false, err + } + if newRS == nil { + reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name) + d.t.Logf(reason) + return false, nil + } + if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, v1beta1.DefaultDeploymentUniqueLabelKey) { + reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name) + d.t.Logf(reason) + return false, nil + } + // Check revision of this deployment, and of the new replica set of this deployment + if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision { + reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name) + d.t.Logf(reason) + return false, nil + } + if deployment.Spec.Template.Spec.Containers[0].Image != image { + reason = fmt.Sprintf("Deployment %q doesn't have the required image set", deployment.Name) + d.t.Logf(reason) + return false, nil + } + if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision { + reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name) + d.t.Logf(reason) + return false, nil + } + if newRS.Spec.Template.Spec.Containers[0].Image != image { + reason = fmt.Sprintf("New replica set %q doesn't have the required image set", newRS.Name) + d.t.Logf(reason) + return false, nil + } + return true, nil + }) + if err == wait.ErrWaitTimeout { + d.logReplicaSetsOfDeployment(nil, newRS) + err = fmt.Errorf(reason) + } + if newRS == nil { + return fmt.Errorf("deployment %q failed to create new replica set", deploymentName) + } + if err != nil { + return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err) + } + return nil +} + +// markAllPodsReady manually updates all Deployment pods status to ready +func (d *deploymentTester) markAllPodsReady() { + ns := d.deployment.Namespace + selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector) + if err != nil { + d.t.Fatalf("failed to parse Deployment selector: %v", err) + } + var readyPods int32 + err = wait.Poll(100*time.Millisecond, pollTimeout, func() (bool, error) { + readyPods = 0 + pods, err := d.c.Core().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + d.t.Logf("failed to list Deployment pods, will retry later: %v", err) + return false, nil + } + for i := range pods.Items { + pod := pods.Items[i] + if podutil.IsPodReady(&pod) { + readyPods++ + continue + } + addPodConditionReady(&pod, metav1.Now()) + if _, err = d.c.Core().Pods(ns).UpdateStatus(&pod); err != nil { + d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err) + } else { + readyPods++ + } + } + if readyPods >= *d.deployment.Spec.Replicas { + return true, nil + } + return false, nil + }) + if err != nil { + d.t.Fatalf("failed to mark all Deployment pods to ready: %v", err) + } +} + +func (d *deploymentTester) waitForDeploymentStatusValid() error { + var ( + oldRSs, allOldRSs, allRSs []*v1beta1.ReplicaSet + newRS *v1beta1.ReplicaSet + deployment *v1beta1.Deployment + reason string + ) + + name := d.deployment.Name + err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { + var err error + deployment, err = d.c.Extensions().Deployments(d.deployment.Namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return false, err + } + oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, d.c) + if err != nil { + return false, err + } + if newRS == nil { + // New RC hasn't been created yet. + reason = "new replica set hasn't been created yet" + d.t.Logf(reason) + return false, nil + } + allRSs = append(oldRSs, newRS) + // The old/new ReplicaSets need to contain the pod-template-hash label + for i := range allRSs { + if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, v1beta1.DefaultDeploymentUniqueLabelKey) { + reason = "all replica sets need to contain the pod-template-hash label" + d.t.Logf(reason) + return false, nil + } + } + totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) + maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) + if totalCreated > maxCreated { + reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) + d.t.Logf(reason) + return false, nil + } + minAvailable := deploymentutil.MinAvailable(deployment) + if deployment.Status.AvailableReplicas < minAvailable { + reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) + d.t.Logf(reason) + return false, nil + } + + // When the deployment status and its underlying resources reach the desired state, we're done + if deploymentutil.DeploymentComplete(deployment, &deployment.Status) { + return true, nil + } + + reason = fmt.Sprintf("deployment status: %#v", deployment.Status) + d.t.Logf(reason) + + return false, nil + }) + + if err == wait.ErrWaitTimeout { + d.logReplicaSetsOfDeployment(allOldRSs, newRS) + d.logPodsOfDeployment(allRSs) + err = fmt.Errorf("%s", reason) + } + if err != nil { + return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.deployment.Name, err) + } + return nil +} + +// waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid +// while marking all Deployment pods as ready at the same time. +func (d *deploymentTester) waitForDeploymentStatusValidAndMarkPodsReady() { + // Manually mark all Deployment pods as ready in a separate goroutine + go d.markAllPodsReady() + + // Make sure the Deployment status is valid while Deployment pods are becoming ready + err := d.waitForDeploymentStatusValid() + if err != nil { + d.t.Fatalf("failed to wait for Deployment status %s: %v", d.deployment.Name, err) + } +} From 282c90bc1ad85ef573639d88791582efd9bb8c2f Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Fri, 12 May 2017 12:45:31 -0700 Subject: [PATCH 2/3] Remove e2e test for creating a new deployment --- test/e2e/deployment.go | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index b6d85d034cd..4d2bd3fd779 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -63,9 +63,6 @@ var _ = framework.KubeDescribe("Deployment", func() { // TODO: Add failure traps once we have JustAfterEach // See https://github.com/onsi/ginkgo/issues/303 - It("deployment should create new pods", func() { - testNewDeployment(f) - }) It("deployment reaping should cascade to its replica sets and pods", func() { testDeleteDeployment(f) }) @@ -192,37 +189,6 @@ func stopDeployment(c clientset.Interface, internalClient internalclientset.Inte } } -func testNewDeployment(f *framework.Framework) { - ns := f.Namespace.Name - c := f.ClientSet - - deploymentName := "test-new-deployment" - podLabels := map[string]string{"name": nginxImageName} - replicas := int32(1) - framework.Logf("Creating simple deployment %s", deploymentName) - d := framework.NewDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType) - d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} - deploy, err := c.Extensions().Deployments(ns).Create(d) - Expect(err).NotTo(HaveOccurred()) - - // Wait for it to be updated to revision 1 - err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForDeploymentStatusValid(c, deploy) - Expect(err).NotTo(HaveOccurred()) - - deployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) - Expect(err).NotTo(HaveOccurred()) - // Check new RS annotations - Expect(newRS.Annotations["test"]).Should(Equal("should-copy-to-replica-set")) - Expect(newRS.Annotations[v1.LastAppliedConfigAnnotation]).Should(Equal("")) - Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-replica-set")) - Expect(deployment.Annotations[v1.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set")) -} - func testDeleteDeployment(f *framework.Framework) { ns := f.Namespace.Name c := f.ClientSet From 3f2d8ae68276629d2e7902610f328616571f0fba Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Mon, 15 May 2017 16:01:37 -0700 Subject: [PATCH 3/3] Extract common code in deployment e2e and integration test --- test/e2e/framework/util.go | 165 ++-------------------- test/integration/deployment/BUILD | 4 +- test/integration/deployment/util.go | 170 +---------------------- test/utils/BUILD | 4 + test/utils/deployment.go | 205 ++++++++++++++++++++++++++++ 5 files changed, 222 insertions(+), 326 deletions(-) create mode 100644 test/utils/deployment.go diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index e4d373e405f..2a7f9a72bb4 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -63,6 +63,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" + testutil "k8s.io/kubernetes/test/utils" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" @@ -122,6 +123,9 @@ const ( // How often to Poll pods, nodes and claims. Poll = 2 * time.Second + pollShortTimeout = 1 * time.Minute + pollLongTimeout = 5 * time.Minute + // service accounts are provisioned after namespace creation // a service account is required to support pod creation in a namespace as part of admission control ServiceAccountProvisionTimeout = 2 * time.Minute @@ -3174,7 +3178,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s Name: deploymentName, }, Spec: extensions.DeploymentSpec{ - Replicas: func(i int32) *int32 { return &i }(replicas), + Replicas: &replicas, Selector: &metav1.LabelSelector{MatchLabels: podLabels}, Strategy: extensions.DeploymentStrategy{ Type: strategyType, @@ -3201,72 +3205,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s // Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created. // To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead. func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error { - var ( - oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet - newRS *extensions.ReplicaSet - deployment *extensions.Deployment - reason string - ) - - err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - var err error - deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c) - if err != nil { - return false, err - } - if newRS == nil { - // New RC hasn't been created yet. - reason = "new replica set hasn't been created yet" - Logf(reason) - return false, nil - } - allRSs = append(oldRSs, newRS) - // The old/new ReplicaSets need to contain the pod-template-hash label - for i := range allRSs { - if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { - reason = "all replica sets need to contain the pod-template-hash label" - Logf(reason) - return false, nil - } - } - totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) - maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) - if totalCreated > maxCreated { - reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) - Logf(reason) - return false, nil - } - minAvailable := deploymentutil.MinAvailable(deployment) - if deployment.Status.AvailableReplicas < minAvailable { - reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) - Logf(reason) - return false, nil - } - - // When the deployment status and its underlying resources reach the desired state, we're done - if deploymentutil.DeploymentComplete(deployment, &deployment.Status) { - return true, nil - } - - reason = fmt.Sprintf("deployment status: %#v", deployment.Status) - Logf(reason) - - return false, nil - }) - - if err == wait.ErrWaitTimeout { - logReplicaSetsOfDeployment(deployment, allOldRSs, newRS) - logPodsOfDeployment(c, deployment, allRSs) - err = fmt.Errorf("%s", reason) - } - if err != nil { - return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err) - } - return nil + return testutil.WaitForDeploymentStatusValid(c, d, Logf, Poll, pollLongTimeout) } // Waits for the deployment to reach desired state. @@ -3409,66 +3348,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er // WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. // Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error { - var deployment *extensions.Deployment - var newRS *extensions.ReplicaSet - var reason string - err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) { - var err error - deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - if err != nil { - return false, err - } - // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label - - newRS, err = deploymentutil.GetNewReplicaSet(deployment, c) - - if err != nil { - return false, err - } - if newRS == nil { - reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name) - Logf(reason) - return false, nil - } - if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { - reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name) - Logf(reason) - return false, nil - } - // Check revision of this deployment, and of the new replica set of this deployment - if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision { - reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name) - Logf(reason) - return false, nil - } - if deployment.Spec.Template.Spec.Containers[0].Image != image { - reason = fmt.Sprintf("Deployment %q doesn't have the required image set", deployment.Name) - Logf(reason) - return false, nil - } - if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision { - reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name) - Logf(reason) - return false, nil - } - if newRS.Spec.Template.Spec.Containers[0].Image != image { - reason = fmt.Sprintf("New replica set %q doesn't have the required image set", newRS.Name) - Logf(reason) - return false, nil - } - return true, nil - }) - if err == wait.ErrWaitTimeout { - logReplicaSetsOfDeployment(deployment, nil, newRS) - err = fmt.Errorf(reason) - } - if newRS == nil { - return fmt.Errorf("deployment %q failed to create new replica set", deploymentName) - } - if err != nil { - return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err) - } - return nil + return testutil.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollShortTimeout) } // CheckNewRSAnnotations check if the new RS's annotation is as expected @@ -3533,17 +3413,7 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string } func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) { - if newRS != nil { - Logf("New ReplicaSet of Deployment %s:\n%+v", deployment.Name, *newRS) - } else { - Logf("New ReplicaSet of Deployment %s is nil.", deployment.Name) - } - if len(allOldRSs) > 0 { - Logf("All old ReplicaSets of Deployment %s:", deployment.Name) - } - for i := range allOldRSs { - Logf("%+v", *allOldRSs[i]) - } + testutil.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf) } func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { @@ -3575,24 +3445,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r } func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) { - minReadySeconds := deployment.Spec.MinReadySeconds - podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return c.Core().Pods(namespace).List(options) - } - - podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) - - if err != nil { - Logf("Failed to list Pods of Deployment %s: %v", deployment.Name, err) - return - } - for _, pod := range podList.Items { - availability := "not available" - if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { - availability = "available" - } - Logf("Pod %s is %s:\n%+v", pod.Name, availability, pod) - } + testutil.LogPodsOfDeployment(c, deployment, rsList, Logf) } // Waits for the number of events on the given object to reach a desired count. diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index 8f2558fff64..eef16ca6cc4 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -31,11 +31,9 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/informers/informers_generated/externalversions:go_default_library", "//pkg/controller/deployment:go_default_library", - "//pkg/controller/deployment/util:go_default_library", "//pkg/controller/replicaset:go_default_library", - "//pkg/util/labels:go_default_library", "//test/integration/framework:go_default_library", - "//vendor/github.com/davecgh/go-spew/spew:go_default_library", + "//test/utils:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library", diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go index d1045b0f199..b23140742f0 100644 --- a/test/integration/deployment/util.go +++ b/test/integration/deployment/util.go @@ -17,12 +17,10 @@ limitations under the License. package deployment import ( - "fmt" "net/http/httptest" "testing" "time" - "github.com/davecgh/go-spew/spew" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" restclient "k8s.io/client-go/rest" @@ -32,10 +30,9 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions" "k8s.io/kubernetes/pkg/controller/deployment" - deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/replicaset" - labelsutil "k8s.io/kubernetes/pkg/util/labels" "k8s.io/kubernetes/test/integration/framework" + testutil "k8s.io/kubernetes/test/utils" ) const ( @@ -132,103 +129,8 @@ func addPodConditionReady(pod *v1.Pod, time metav1.Time) { } } -func (d *deploymentTester) logReplicaSetsOfDeployment(allOldRSs []*v1beta1.ReplicaSet, newRS *v1beta1.ReplicaSet) { - if newRS != nil { - d.t.Logf("New ReplicaSet of Deployment %s:\n%+v", d.deployment.Name, *newRS) - } else { - d.t.Logf("New ReplicaSet of Deployment %s is nil.", d.deployment.Name) - } - if len(allOldRSs) > 0 { - d.t.Logf("All old ReplicaSets of Deployment %s:", d.deployment.Name) - } - for i := range allOldRSs { - d.t.Logf(spew.Sprintf("%#v", *allOldRSs[i])) - } -} - -func (d *deploymentTester) logPodsOfDeployment(rsList []*v1beta1.ReplicaSet) { - minReadySeconds := d.deployment.Spec.MinReadySeconds - podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { - return d.c.Core().Pods(namespace).List(options) - } - - podList, err := deploymentutil.ListPods(d.deployment, rsList, podListFunc) - - if err != nil { - d.t.Logf("Failed to list Pods of Deployment %s: %v", d.deployment.Name, err) - return - } - for _, pod := range podList.Items { - availability := "not available" - if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { - availability = "available" - } - d.t.Logf("Pod %s is %s:\n%s", pod.Name, availability, spew.Sprintf("%#v", pod)) - } -} - -// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. -// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error { - var deployment *v1beta1.Deployment - var newRS *v1beta1.ReplicaSet - var reason string - deploymentName, ns := d.deployment.Name, d.deployment.Namespace - err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { - var err error - deployment, err = d.c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) - if err != nil { - return false, err - } - // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label - newRS, err = deploymentutil.GetNewReplicaSet(deployment, d.c) - if err != nil { - return false, err - } - if newRS == nil { - reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name) - d.t.Logf(reason) - return false, nil - } - if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, v1beta1.DefaultDeploymentUniqueLabelKey) { - reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name) - d.t.Logf(reason) - return false, nil - } - // Check revision of this deployment, and of the new replica set of this deployment - if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision { - reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name) - d.t.Logf(reason) - return false, nil - } - if deployment.Spec.Template.Spec.Containers[0].Image != image { - reason = fmt.Sprintf("Deployment %q doesn't have the required image set", deployment.Name) - d.t.Logf(reason) - return false, nil - } - if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision { - reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name) - d.t.Logf(reason) - return false, nil - } - if newRS.Spec.Template.Spec.Containers[0].Image != image { - reason = fmt.Sprintf("New replica set %q doesn't have the required image set", newRS.Name) - d.t.Logf(reason) - return false, nil - } - return true, nil - }) - if err == wait.ErrWaitTimeout { - d.logReplicaSetsOfDeployment(nil, newRS) - err = fmt.Errorf(reason) - } - if newRS == nil { - return fmt.Errorf("deployment %q failed to create new replica set", deploymentName) - } - if err != nil { - return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err) - } - return nil + return testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout) } // markAllPodsReady manually updates all Deployment pods status to ready @@ -270,73 +172,7 @@ func (d *deploymentTester) markAllPodsReady() { } func (d *deploymentTester) waitForDeploymentStatusValid() error { - var ( - oldRSs, allOldRSs, allRSs []*v1beta1.ReplicaSet - newRS *v1beta1.ReplicaSet - deployment *v1beta1.Deployment - reason string - ) - - name := d.deployment.Name - err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { - var err error - deployment, err = d.c.Extensions().Deployments(d.deployment.Namespace).Get(name, metav1.GetOptions{}) - if err != nil { - return false, err - } - oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, d.c) - if err != nil { - return false, err - } - if newRS == nil { - // New RC hasn't been created yet. - reason = "new replica set hasn't been created yet" - d.t.Logf(reason) - return false, nil - } - allRSs = append(oldRSs, newRS) - // The old/new ReplicaSets need to contain the pod-template-hash label - for i := range allRSs { - if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, v1beta1.DefaultDeploymentUniqueLabelKey) { - reason = "all replica sets need to contain the pod-template-hash label" - d.t.Logf(reason) - return false, nil - } - } - totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) - maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) - if totalCreated > maxCreated { - reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) - d.t.Logf(reason) - return false, nil - } - minAvailable := deploymentutil.MinAvailable(deployment) - if deployment.Status.AvailableReplicas < minAvailable { - reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) - d.t.Logf(reason) - return false, nil - } - - // When the deployment status and its underlying resources reach the desired state, we're done - if deploymentutil.DeploymentComplete(deployment, &deployment.Status) { - return true, nil - } - - reason = fmt.Sprintf("deployment status: %#v", deployment.Status) - d.t.Logf(reason) - - return false, nil - }) - - if err == wait.ErrWaitTimeout { - d.logReplicaSetsOfDeployment(allOldRSs, newRS) - d.logPodsOfDeployment(allRSs) - err = fmt.Errorf("%s", reason) - } - if err != nil { - return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.deployment.Name, err) - } - return nil + return testutil.WaitForDeploymentStatusValid(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout) } // waitForDeploymentStatusValidAndMarkPodsReady waits for the Deployment status to become valid diff --git a/test/utils/BUILD b/test/utils/BUILD index 27208ef4e33..3c94c471887 100644 --- a/test/utils/BUILD +++ b/test/utils/BUILD @@ -12,6 +12,7 @@ go_library( srcs = [ "conditions.go", "density_utils.go", + "deployment.go", "pod_store.go", "runners.go", "tmpdir.go", @@ -20,12 +21,15 @@ go_library( deps = [ "//pkg/api:go_default_library", "//pkg/api/v1:go_default_library", + "//pkg/api/v1/pod:go_default_library", "//pkg/apis/batch:go_default_library", "//pkg/apis/batch/v1:go_default_library", "//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/client/clientset_generated/internalclientset:go_default_library", + "//pkg/controller/deployment/util:go_default_library", + "//pkg/util/labels:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", diff --git a/test/utils/deployment.go b/test/utils/deployment.go new file mode 100644 index 00000000000..152685ad4cc --- /dev/null +++ b/test/utils/deployment.go @@ -0,0 +1,205 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/api/v1" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" + labelsutil "k8s.io/kubernetes/pkg/util/labels" +) + +type LogfFn func(format string, args ...interface{}) + +func LogReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, logf LogfFn) { + if newRS != nil { + logf("New ReplicaSet of Deployment %s:\n%+v", deployment.Name, *newRS) + } else { + logf("New ReplicaSet of Deployment %s is nil.", deployment.Name) + } + if len(allOldRSs) > 0 { + logf("All old ReplicaSets of Deployment %s:", deployment.Name) + } + for i := range allOldRSs { + logf("%+v", *allOldRSs[i]) + } +} + +func LogPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, logf LogfFn) { + minReadySeconds := deployment.Spec.MinReadySeconds + podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { + return c.Core().Pods(namespace).List(options) + } + + podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc) + + if err != nil { + logf("Failed to list Pods of Deployment %s: %v", deployment.Name, err) + return + } + for _, pod := range podList.Items { + availability := "not available" + if podutil.IsPodAvailable(&pod, minReadySeconds, metav1.Now()) { + availability = "available" + } + logf("Pod %s is %s:\n%+v", pod.Name, availability, pod) + } +} + +// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore). +// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created. +// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead. +func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error { + var ( + oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet + newRS *extensions.ReplicaSet + deployment *extensions.Deployment + reason string + ) + + err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { + var err error + deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c) + if err != nil { + return false, err + } + if newRS == nil { + // New RC hasn't been created yet. + reason = "new replica set hasn't been created yet" + logf(reason) + return false, nil + } + allRSs = append(oldRSs, newRS) + // The old/new ReplicaSets need to contain the pod-template-hash label + for i := range allRSs { + if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { + reason = "all replica sets need to contain the pod-template-hash label" + logf(reason) + return false, nil + } + } + totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs) + maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) + if totalCreated > maxCreated { + reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated) + logf(reason) + return false, nil + } + minAvailable := deploymentutil.MinAvailable(deployment) + if deployment.Status.AvailableReplicas < minAvailable { + reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable) + logf(reason) + return false, nil + } + + // When the deployment status and its underlying resources reach the desired state, we're done + if deploymentutil.DeploymentComplete(deployment, &deployment.Status) { + return true, nil + } + + reason = fmt.Sprintf("deployment status: %#v", deployment.Status) + logf(reason) + + return false, nil + }) + + if err == wait.ErrWaitTimeout { + LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf) + LogPodsOfDeployment(c, deployment, allRSs, logf) + err = fmt.Errorf("%s", reason) + } + if err != nil { + return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err) + } + return nil +} + +// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. +// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early. +func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string, logf LogfFn, pollInterval, pollTimeout time.Duration) error { + var deployment *extensions.Deployment + var newRS *extensions.ReplicaSet + var reason string + err := wait.Poll(pollInterval, pollTimeout, func() (bool, error) { + var err error + deployment, err = c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) + if err != nil { + return false, err + } + // The new ReplicaSet needs to be non-nil and contain the pod-template-hash label + + newRS, err = deploymentutil.GetNewReplicaSet(deployment, c) + + if err != nil { + return false, err + } + if newRS == nil { + reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name) + logf(reason) + return false, nil + } + if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { + reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name) + logf(reason) + return false, nil + } + // Check revision of this deployment, and of the new replica set of this deployment + if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision { + reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name) + logf(reason) + return false, nil + } + if deployment.Spec.Template.Spec.Containers[0].Image != image { + reason = fmt.Sprintf("Deployment %q doesn't have the required image set", deployment.Name) + logf(reason) + return false, nil + } + if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision { + reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name) + logf(reason) + return false, nil + } + if newRS.Spec.Template.Spec.Containers[0].Image != image { + reason = fmt.Sprintf("New replica set %q doesn't have the required image set", newRS.Name) + logf(reason) + return false, nil + } + return true, nil + }) + if err == wait.ErrWaitTimeout { + LogReplicaSetsOfDeployment(deployment, nil, newRS, logf) + err = fmt.Errorf(reason) + } + if newRS == nil { + return fmt.Errorf("deployment %q failed to create new replica set", deploymentName) + } + if err != nil { + return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err) + } + return nil +}