use framework.ExpectNoError() for daemon_set.go and deployment.go in e2e/apps

This commit is contained in:
toyoda
2019-05-07 14:54:01 +09:00
parent d881c0d77b
commit 4841e5b98c
2 changed files with 157 additions and 141 deletions

View File

@@ -89,7 +89,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
e2elog.Logf("unable to dump pods: %v", err)
}
err = clearDaemonSetNodeLabels(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
f = framework.NewDefaultFramework("daemonsets")
@@ -106,12 +106,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
c = f.ClientSet
updatedNS, err := updateNamespaceAnnotations(c, ns)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
/*
@@ -124,19 +124,19 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
pod := podList.Items[0]
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
})
@@ -153,7 +153,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
@@ -169,14 +169,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green"
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
framework.ExpectNoError(err, "error removing labels on node")
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
@@ -188,7 +188,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
@@ -216,7 +216,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
},
}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
@@ -232,13 +232,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
framework.ExpectNoError(err, "error removing labels on node")
gomega.Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to not be running on nodes")
})
/*
@@ -250,13 +250,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
@@ -282,7 +282,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -290,7 +290,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds)
firstHash := first.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -300,11 +300,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods images aren't updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -312,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
gomega.Expect(cur.Revision).To(gomega.Equal(int64(2)))
@@ -331,7 +331,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -339,7 +339,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -349,18 +349,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
ginkgo.By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods are still running on every node of the cluster.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -368,7 +368,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds)
hash = cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
@@ -389,7 +389,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
@@ -401,11 +401,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Make sure we're in the middle of a rollout
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pods := listDaemonPods(c, ns, label)
var existingPods, newPods []*v1.Pod
@@ -433,11 +433,11 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Make sure DaemonSet rollback is complete")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
pods = listDaemonPods(c, ns, label)
@@ -487,7 +487,7 @@ func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
return podList
}
@@ -748,7 +748,7 @@ func listDaemonHistories(c clientset.Interface, ns string, label map[string]stri
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
return historyList
}
@@ -761,7 +761,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
// Every history should have the hash label
gomega.Expect(len(history.Labels[apps.DefaultDaemonSetUniqueLabelKey])).To(gomega.BeNumerically(">", 0))
match, err := daemon.Match(ds, history)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if match {
curHistory = history
foundCurHistories++