diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 0bea93eb51a..d1434339631 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -162,7 +162,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { + err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(secrets.Items) == found { @@ -174,6 +174,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found = len(secrets.Items) return false, nil }) + framework.ExpectNoError(err) defaultSecrets := fmt.Sprintf("%d", found) hardSecrets := fmt.Sprintf("%d", found+1) @@ -327,7 +328,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - wait.Poll(1*time.Second, time.Minute, func() (bool, error) { + err := wait.Poll(1*time.Second, time.Minute, func() (bool, error) { configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(configmaps.Items) == found { @@ -339,6 +340,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found = len(configmaps.Items) return false, nil }) + framework.ExpectNoError(err) defaultConfigMaps := fmt.Sprintf("%d", found) hardConfigMaps := fmt.Sprintf("%d", found+1) diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 0299a1a1d4f..ad3739fda5a 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -542,7 +542,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, // testVolumeClient might get used more than once per test, therefore // we have to clean up before returning. e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name) - e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() testVolumeContent(f, clientPod, "", fsGroup, fsType, tests) @@ -577,7 +577,7 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs // This pod must get deleted before the function returns becaue the test relies on // the volume not being in use. e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name) - e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() ginkgo.By("Writing text file contents in the container.") diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 8535a109f15..a3a06c25af8 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -470,7 +470,8 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb Name: rbacRoleName, }, } - f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) + framework.ExpectNoError(err) } func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) { @@ -492,7 +493,8 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN Name: rbacRoleName, }, } - f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) + framework.ExpectNoError(err) } // createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name. diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index 3b806466c3c..bfb20312892 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -58,7 +58,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 5 times and checking average duration") iterations := 5 @@ -148,7 +149,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 1 time") iterations := 1