e2e flake CheckDaemonStatus assert on async value

The util for checking on daemonstatus was checking once if the Status of
the daemonset was reporting that all the desired Pods are scheduled and
ready.

However, the pattern used in the e2e test for this function was not
taking into consideration that the controller needs to propagate the Pod
status to the DeamonSet status, and was asserting on the condition only
once after waiting for all the Pods to be ready.

In order to avoid more churn code, change the CheckDaemonStatus
signature to the wait.Condition type and use it in a async poll loop on
the tests.
This commit is contained in:
Antonio Ojea 2024-10-14 13:30:03 +00:00
parent de8f6b0db7
commit bceec5a3ff
5 changed files with 35 additions and 32 deletions

View File

@ -136,8 +136,8 @@ var _ = SIGDescribe("ControllerRevision", framework.WithSerial(), func() {
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By(fmt.Sprintf("Confirm DaemonSet %q successfully created with %q label", dsName, dsLabelSelector))
dsList, err := csAppsV1.DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: dsLabelSelector})

View File

@ -184,8 +184,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(ctx, c, ns, label)
@ -224,8 +224,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("Update the node label to green, and wait for daemons to be unscheduled")
nodeSelector[daemonsetColorLabel] = "green"
@ -243,8 +243,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
})
// We defer adding this test to conformance pending the disposition of moving DaemonSet scheduling logic to the
@ -287,8 +287,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
gomega.Expect(daemonSetLabels).To(gomega.HaveLen(1))
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, []string{newNode.Name}))
framework.ExpectNoError(err, "error waiting for daemon pods to be running on new nodes")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("Remove the node label and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(ctx, c, node.Name, map[string]string{})
@ -312,8 +312,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
podList := listDaemonPods(ctx, c, ns, label)
@ -863,8 +863,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("listing all DaemonSets")
dsList, err := cs.AppsV1().DaemonSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
@ -911,8 +911,8 @@ var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, checkRunningOnAllNodes(f, testDaemonset))
framework.ExpectNoError(err, "error waiting for daemon pod to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, dsName)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, dsRetryPeriod, dsRetryTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, dsName))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By("Getting /status")
dsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}

View File

@ -18,7 +18,6 @@ package daemonset
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
@ -139,16 +138,18 @@ func checkDaemonPodStateOnNodes(ctx context.Context, c clientset.Interface, ds *
return len(nodesToPodCount) == len(nodeNames), nil
}
// CheckDaemonStatus returns an error if not all desired pods are scheduled or
// not all of them are ready.
func CheckDaemonStatus(ctx context.Context, f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(ctx, dsName, metav1.GetOptions{})
if err != nil {
return err
// CheckDaemonStatus returns false if not all desired pods are scheduled or not all of them are ready.
func CheckDaemonStatus(ctx context.Context, f *framework.Framework, dsName string) func(ctx context.Context) (bool, error) {
return func(ctx context.Context) (bool, error) {
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(ctx, dsName, metav1.GetOptions{})
if err != nil {
return false, err
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired == scheduled && scheduled == ready {
return true, nil
}
framework.Logf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
return false, nil
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired == scheduled && scheduled == ready {
return nil
}
return fmt.Errorf("error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
}

View File

@ -1307,8 +1307,8 @@ func testRollingUpdateLBConnectivityDisruption(ctx context.Context, f *framework
creationTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs)
err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, e2edaemonset.CheckDaemonPodOnNodes(f, ds, nodeNames))
framework.ExpectNoError(err, "error waiting for daemon pods to start")
err = e2edaemonset.CheckDaemonStatus(ctx, f, name)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, framework.Poll, creationTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, name))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
ginkgo.By(fmt.Sprintf("Creating a service %s with type=LoadBalancer externalTrafficPolicy=%s in namespace %s", name, externalTrafficPolicy, ns))
jig := e2eservice.NewTestJig(cs, ns, name)

View File

@ -94,6 +94,8 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(ctx context.Context, f *
// DaemonSet resource itself should be good
ginkgo.By("confirming the DaemonSet resource is in a good state")
err = e2edaemonset.CheckDaemonStatus(ctx, f, t.daemonSet.Name)
framework.ExpectNoError(err)
err = wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PodStartTimeout, true, e2edaemonset.CheckDaemonStatus(ctx, f, t.daemonSet.Name))
framework.ExpectNoError(err, "error waiting for daemonset to report all pods are scheduled and ready")
}