From f55588fa0b095bebc4f8d197dfb7ca3b6bb27fd7 Mon Sep 17 00:00:00 2001 From: liyuerich Date: Thu, 31 Aug 2023 18:35:36 +0800 Subject: [PATCH] e2e_storage:stop using deprecated framework.ExpectError Signed-off-by: liyuerich --- test/e2e/framework/expect.go | 9 ----- test/e2e/storage/csimock/csi_attach_volume.go | 3 +- test/e2e/storage/csimock/csi_snapshot.go | 4 ++- .../storage/csimock/csi_volume_expansion.go | 6 ++-- test/e2e/storage/persistent_volumes-local.go | 12 +++---- test/e2e/storage/testsuites/provisioning.go | 2 +- test/e2e/storage/testsuites/snapshottable.go | 6 ++-- test/e2e/storage/testsuites/volume_expand.go | 23 +++++++++++-- test/e2e/storage/utils/utils.go | 34 +++++++++++++++++++ test/e2e/storage/volume_metrics.go | 4 +-- test/e2e/storage/volume_provisioning.go | 6 ++-- 11 files changed, 78 insertions(+), 31 deletions(-) diff --git a/test/e2e/framework/expect.go b/test/e2e/framework/expect.go index e2ec148f110..9d7c6474557 100644 --- a/test/e2e/framework/expect.go +++ b/test/e2e/framework/expect.go @@ -293,15 +293,6 @@ func (f *FailureError) backtrace() { // } var ErrFailure error = FailureError{} -// ExpectError expects an error happens, otherwise an exception raises -// -// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check -// specifically for the error that is expected with -// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring())) -func ExpectError(err error, explain ...interface{}) { - gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...) -} - // ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. func ExpectNoError(err error, explain ...interface{}) { ExpectNoErrorWithOffset(1, err, explain...) diff --git a/test/e2e/storage/csimock/csi_attach_volume.go b/test/e2e/storage/csimock/csi_attach_volume.go index e1107f0dd21..29a69b90b94 100644 --- a/test/e2e/storage/csimock/csi_attach_volume.go +++ b/test/e2e/storage/csimock/csi_attach_volume.go @@ -23,6 +23,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { } } if test.disableAttach { - framework.ExpectError(err, "Unexpected VolumeAttachment found") + gomega.Expect(err).To(gomega.MatchError(apierrors.IsNotFound, "Unexpected VolumeAttachment found")) } }) diff --git a/test/e2e/storage/csimock/csi_snapshot.go b/test/e2e/storage/csimock/csi_snapshot.go index 377b53245a1..a6966c96944 100644 --- a/test/e2e/storage/csimock/csi_snapshot.go +++ b/test/e2e/storage/csimock/csi_snapshot.go @@ -22,6 +22,8 @@ import ( "time" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" v1 "k8s.io/api/core/v1" @@ -430,7 +432,7 @@ func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConf // check if the snapshot is deleted _, err = dc.Resource(utils.SnapshotGVR).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{}) - framework.ExpectError(err) + gomega.Expect(err).To(gomega.MatchError(apierrors.IsNotFound, "the snapshot is not deleted")) } type snapshotMetricsTestConfig struct { diff --git a/test/e2e/storage/csimock/csi_volume_expansion.go b/test/e2e/storage/csimock/csi_volume_expansion.go index 495bfb0464d..bf55eec8404 100644 --- a/test/e2e/storage/csimock/csi_volume_expansion.go +++ b/test/e2e/storage/csimock/csi_volume_expansion.go @@ -24,6 +24,7 @@ import ( csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" v1 "k8s.io/api/core/v1" @@ -151,11 +152,10 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { framework.Failf("error updating pvc size %q", pvc.Name) } if test.expectFailure { - err = testsuites.WaitForResizingCondition(ctx, pvc, m.cs, csiResizingConditionWait) - framework.ExpectError(err, "unexpected resizing condition on PVC") + gomega.Consistently(ctx, framework.GetObject(m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get, pvc.Name, metav1.GetOptions{})).WithTimeout(csiResizingConditionWait). + ShouldNot(gomega.HaveField("Status.Conditions", gomega.ContainElement(gomega.HaveField("Type", gomega.Equal("PersistentVolumeClaimResizing")))), "unexpected resizing condition on PVC") return } - ginkgo.By("Waiting for persistent volume resize to finish") err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) framework.ExpectNoError(err, "While waiting for CSI PV resize to finish") diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 91cc7cbd8d2..9a5bc5711d3 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -325,10 +325,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { } ginkgo.By("Creating local PVC and PV") createLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}, immediateMode) - pod, err := createLocalPod(ctx, config, testVol, nil) - framework.ExpectError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) - framework.ExpectError(err) + // createLocalPod will create a pod and wait for it to be running. In this case, + // It's expected that the Pod fails to start. + _, err := createLocalPod(ctx, config, testVol, nil) + gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("is not Running"))) cleanupLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}) }) @@ -348,8 +348,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { pod, err := config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) - framework.ExpectError(err) + getPod := e2epod.Get(f.ClientSet, pod) + gomega.Consistently(ctx, getPod, f.Timeouts.PodStart, 2*time.Second).ShouldNot(e2epod.BeInPhase(v1.PodRunning)) cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) }) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index d767e358135..65b57563668 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -1004,7 +1004,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out ginkgo.By("checking the claims are in pending state") err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) - framework.ExpectError(err) + gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound"))) verifyPVCsPending(ctx, t.Client, createdClaims) ginkgo.By("creating a pod referring to the claims") diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 2343cb8e7a6..e45d040d2e0 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -410,12 +410,12 @@ func deleteVolumeSnapshot(ctx context.Context, f *framework.Framework, dc dynami switch pattern.SnapshotDeletionPolicy { case storageframework.DeleteSnapshot: ginkgo.By("checking the SnapshotContent has been deleted") - err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) + err = storageutils.EnsureGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete, "") framework.ExpectNoError(err) case storageframework.RetainSnapshot: ginkgo.By("checking the SnapshotContent has not been deleted") - err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) - framework.ExpectError(err) + err = storageutils.EnsureNoGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */, "") + framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 2e29277418c..8f64ec4bd49 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -25,6 +25,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/errors" @@ -157,6 +158,24 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, ginkgo.DeferCleanup(cleanup) var err error + // create Pod with pvc + ginkgo.By("Creating a pod with PVC") + podConfig := e2epod.Config{ + NS: f.Namespace.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epod.GetLinuxLabel(), + NodeSelection: l.config.ClientNodeSelection, + ImageID: e2epod.GetDefaultTestImageID(), + } + l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart) + ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod) + framework.ExpectNoError(err, "While creating pods for expanding") + + // Waiting for pod to run + ginkgo.By("Waiting for pod to run") + err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart) + framework.ExpectNoError(err) + gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil()) allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse()) @@ -166,7 +185,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, newSize.Add(resource.MustParse("1Gi")) framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) _, err = ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet) - framework.ExpectError(err, "While updating non-expandable PVC") + gomega.Expect(err).To(gomega.MatchError(apierrors.IsForbidden, "While updating non-expandable PVC")) }) } else { ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) { @@ -316,7 +335,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size return true, nil }) if wait.Interrupted(waitErr) { - return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %v", lastUpdateError) + return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %w", lastUpdateError) } if waitErr != nil { return nil, fmt.Errorf("failed to expand PVC size (check logs for error): %v", waitErr) diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index b6f18a11832..1dea4b13d44 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -619,6 +619,40 @@ func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.Gro return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout) } +// EnsureGVRDeletion checks that no object as defined by the group/version/kind and name is ever found during the given time period +func EnsureGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error { + var resourceClient dynamic.ResourceInterface + if namespace != "" { + resourceClient = c.Resource(gvr).Namespace(namespace) + } else { + resourceClient = c.Resource(gvr) + } + + err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error { + _, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{}) + return err + }).WithTimeout(timeout).WithPolling(poll).Should(gomega.MatchError(apierrors.IsNotFound, fmt.Sprintf("failed to delete %s %s", gvr, objectName))) + return err +} + +// EnsureNoGVRDeletion checks that an object as defined by the group/version/kind and name has not been deleted during the given time period +func EnsureNoGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error { + var resourceClient dynamic.ResourceInterface + if namespace != "" { + resourceClient = c.Resource(gvr).Namespace(namespace) + } else { + resourceClient = c.Resource(gvr) + } + err := framework.Gomega().Consistently(ctx, func(ctx context.Context) error { + _, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get %s %s: %w", gvr.Resource, objectName, err) + } + return nil + }).WithTimeout(timeout).WithPolling(poll).Should(gomega.Succeed()) + return err +} + // WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName) diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 0ad173bd4da..6012e3affb9 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -194,8 +194,8 @@ var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) - err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) - framework.ExpectError(err) + getPod := e2epod.Get(f.ClientSet, pod) + gomega.Consistently(ctx, getPod, f.Timeouts.PodStart, 2*time.Second).ShouldNot(e2epod.BeInPhase(v1.PodRunning)) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 78cd9175e01..fed0b93157a 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -552,7 +552,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - framework.ExpectError(err) + gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound"))) framework.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -591,7 +591,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // The claim should timeout phase:Pending err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) - framework.ExpectError(err) + gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound"))) framework.Logf(err.Error()) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -720,7 +720,7 @@ func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface return true, nil // No PVs remain }) if err != nil { - return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %w", err) + return remainingPVs, fmt.Errorf("error waiting for PVs to be deleted: %w", err) } return nil, nil }