diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 0bea93eb51a..d1434339631 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -162,7 +162,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { + err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(secrets.Items) == found { @@ -174,6 +174,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found = len(secrets.Items) return false, nil }) + framework.ExpectNoError(err) defaultSecrets := fmt.Sprintf("%d", found) hardSecrets := fmt.Sprintf("%d", found+1) @@ -327,7 +328,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found, unchanged := 0, 0 // On contended servers the service account controller can slow down, leading to the count changing during a run. // Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely. - wait.Poll(1*time.Second, time.Minute, func() (bool, error) { + err := wait.Poll(1*time.Second, time.Minute, func() (bool, error) { configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) framework.ExpectNoError(err) if len(configmaps.Items) == found { @@ -339,6 +340,7 @@ var _ = SIGDescribe("ResourceQuota", func() { found = len(configmaps.Items) return false, nil }) + framework.ExpectNoError(err) defaultConfigMaps := fmt.Sprintf("%d", found) hardConfigMaps := fmt.Sprintf("%d", found+1) diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 1249c508652..ad3739fda5a 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -539,8 +539,10 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, framework.Failf("Failed to create client pod: %v", err) } defer func() { + // testVolumeClient might get used more than once per test, therefore + // we have to clean up before returning. e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name) - e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() testVolumeContent(f, clientPod, "", fsGroup, fsType, tests) @@ -572,8 +574,10 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs return } defer func() { + // This pod must get deleted before the function returns becaue the test relies on + // the volume not being in use. e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name) - e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) + framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) }() ginkgo.By("Writing text file contents in the container.") diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index e6510df361b..7947f5048f0 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -398,7 +398,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(gomega.BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.Succeed()) gomega.Eventually(func() error { channel, msg, err := wsRead(ws) @@ -412,7 +412,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("received the wrong port: %d", p) } return nil - }, time.Minute, 10*time.Second).Should(gomega.BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.Succeed()) ginkgo.By("Sending the expected data to the local port") err = wsWrite(ws, 0, []byte("def")) @@ -436,7 +436,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { return fmt.Errorf("expected %q from server, got %q", expectedData, buf.Bytes()) } return nil - }, time.Minute, 10*time.Second).Should(gomega.BeNil()) + }, time.Minute, 10*time.Second).Should(gomega.Succeed()) ginkgo.By("Verifying logs") gomega.Eventually(func() (string, error) { diff --git a/test/e2e/storage/csi_mock/base.go b/test/e2e/storage/csi_mock/base.go index 4b21fdebc94..0a4a72b5897 100644 --- a/test/e2e/storage/csi_mock/base.go +++ b/test/e2e/storage/csi_mock/base.go @@ -100,17 +100,16 @@ type testParameters struct { } type mockDriverSetup struct { - cs clientset.Interface - config *storageframework.PerTestConfig - testCleanups []func() - pods []*v1.Pod - pvcs []*v1.PersistentVolumeClaim - sc map[string]*storagev1.StorageClass - vsc map[string]*unstructured.Unstructured - driver drivers.MockCSITestDriver - provisioner string - tp testParameters - f *framework.Framework + cs clientset.Interface + config *storageframework.PerTestConfig + pods []*v1.Pod + pvcs []*v1.PersistentVolumeClaim + sc map[string]*storagev1.StorageClass + vsc map[string]*unstructured.Unstructured + driver drivers.MockCSITestDriver + provisioner string + tp testParameters + f *framework.Framework } type volumeType string @@ -236,10 +235,6 @@ func (m *mockDriverSetup) cleanup() { ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName())) m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{}) } - ginkgo.By("Cleaning up resources") - for _, cleanupFunc := range m.testCleanups { - cleanupFunc() - } err := utilerrors.NewAggregate(errs) framework.ExpectNoError(err, "while cleaning up after test") diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 8535a109f15..a3a06c25af8 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -470,7 +470,8 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb Name: rbacRoleName, }, } - f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{}) + framework.ExpectNoError(err) } func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) { @@ -492,7 +493,8 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN Name: rbacRoleName, }, } - f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) + _, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{}) + framework.ExpectNoError(err) } // createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name. diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index 3b806466c3c..bfb20312892 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -58,7 +58,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string)) + framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 5 times and checking average duration") iterations := 5 @@ -148,7 +149,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() { ginkgo.By("Waiting up to 3 minutes for pods to be running") timeout := 3 * time.Minute - e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string)) + framework.ExpectNoError(err) ginkgo.By("Getting kubelet stats 1 time") iterations := 1