From bceec5a3ffd6975105f95b08641e45644c0a6753 Mon Sep 17 00:00:00 2001 From: Antonio Ojea Date: Mon, 14 Oct 2024 13:30:03 +0000 Subject: [PATCH] e2e flake CheckDaemonStatus assert on async value The util for checking on daemonstatus was checking once if the Status of the daemonset was reporting that all the desired Pods are scheduled and ready. However, the pattern used in the e2e test for this function was not taking into consideration that the controller needs to propagate the Pod status to the DeamonSet status, and was asserting on the condition only once after waiting for all the Pods to be ready. In order to avoid more churn code, change the CheckDaemonStatus signature to the wait.Condition type and use it in a async poll loop on the tests. --- test/e2e/apps/controller_revision.go | 4 ++-- test/e2e/apps/daemon_set.go | 28 ++++++++++++------------ test/e2e/framework/daemonset/fixtures.go | 25 +++++++++++---------- test/e2e/network/loadbalancer.go | 4 ++-- test/e2e/upgrades/apps/daemonsets.go | 6 +++-- 5 files changed, 35 insertions(+), 32 deletions(-) diff --git a/test/e2e/apps/controller_revision.go b/test/e2e/apps/controller_revision.go index f4194c92cb3..4feb4722292 100644 --- a/test/e2e/apps/controller_revision.go +++ b/test/e2e/apps/controller_revision.go @@ -136,8 +136,8 @@ var _ = SIGDescribe("ControllerRevision", framework.WithSerial(), func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector)) dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector}) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index ad352fbb940..b29654acc74 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -184,8 +184,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.") podList := listDaemonPods(ctx, c, ns, label) @@ -224,8 +224,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled") nodeSelector[daemonsetColorLabel] = "green" @@ -243,8 +243,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") }) // We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the @@ -287,8 +287,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1)) err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name})) framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("Remove the node label and wait for daemons to be unscheduled") _, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{}) @@ -312,8 +312,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.") podList := listDaemonPods(ctx, c, ns, label) @@ -863,8 +863,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("listing all DaemonSets") dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) @@ -911,8 +911,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { ginkgo.By("Check that daemon pods launch on every node of the cluster.") err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset)) framework.ExpectNoError(err, "error waiting for daemon pod to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By("Getting /status") dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"} diff --git a/test/e2e/framework/daemonset/fixtures.go b/test/e2e/framework/daemonset/fixtures.go index ed725010224..dc2ba9c5cb0 100644 --- a/test/e2e/framework/daemonset/fixtures.go +++ b/test/e2e/framework/daemonset/fixtures.go @@ -18,7 +18,6 @@ package daemonset import ( "context" - "fmt" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -139,16 +138,18 @@ func checkDaemonPodStateOnNodes(ctx context.Context, c clientset.Interface, ds * return len(nodesToPodCount) == len(nodeNames), nil } -// CheckDaemonStatus returns an error if not all desired pods are scheduled or -// not all of them are ready. -func CheckDaemonStatus(ctx context.Context, f *framework.Framework, dsName string) error { - ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(ctx, dsName, metav1.GetOptions{}) - if err != nil { - return err +// CheckDaemonStatus returns false if not all desired pods are scheduled or not all of them are ready. +func CheckDaemonStatus(ctx context.Context, f *framework.Framework, dsName string) func(ctx context.Context) (bool, error) { + return func(ctx context.Context) (bool, error) { + ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(ctx, dsName, metav1.GetOptions{}) + if err != nil { + return false, err + } + desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady + if desired == scheduled && scheduled == ready { + return true, nil + } + framework.Logf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready) + return false, nil } - desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady - if desired == scheduled && scheduled == ready { - return nil - } - return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready) } diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index 5612a4b89d4..6158c022fd8 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -1307,8 +1307,8 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames)) framework.ExpectNoError(err, "error waiting for daemon pods to start") - err = e2edaemonset.CheckDaemonStatus(ctx, f, name) - framework.ExpectNoError(err) + err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, name)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") ginkgo.By(fmt.Sprintf("Creating a service %s with type=LoadBalancer externalTrafficPolicy=%s in namespace %s", name, externalTrafficPolicy, ns)) jig := e2eservice.NewTestJig(cs, ns, name) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 61cb1cb6bb5..e5e32bec23b 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -94,6 +94,8 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(ctx context.Context, f * // DaemonSet resource itself should be good ginkgo.By("confirming the DaemonSet resource is in a good state") - err = e2edaemonset.CheckDaemonStatus(ctx, f, t.daemonSet.Name) - framework.ExpectNoError(err) + + err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, t.daemonSet.Name)) + framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready") + }