DaemonSet e2e: Update image and rolling upgrade test timeout

Use Nginx as the DaemonSet image instead of the ServeHostname image.
This was changed because the ServeHostname has a sleep after terminating
which makes it incompatible with the DaemonSet Rolling Upgrade e2e test.

In addition, make the DaemonSet Rolling Upgrade e2e test timeout a
function of the number of nodes that make up the cluster. This is
required because the more nodes there are, the longer the time it will
take to complete a rolling upgrade.

Signed-off-by: Alexander Brand <alexbrand09@gmail.com>
This commit is contained in:
Alexander Brand 2019-01-22 11:09:30 -05:00
parent fd0df59f5b
commit d971597b59
No known key found for this signature in database
GPG Key ID: 4CCE2B5816854A45

View File

@ -93,7 +93,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
f = framework.NewDefaultFramework("daemonsets")
image := framework.ServeHostnameImage
image := NginxImage
dsName := "daemon-set"
var ns string
@ -350,8 +350,15 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred())
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
By("Check that daemon pods images are updated.")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
err = wait.PollImmediate(dsRetryPeriod, retryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
Expect(err).NotTo(HaveOccurred())
By("Check that daemon pods are still running on every node of the cluster.")