e2e_storage:stop using deprecated framework.ExpectError

Signed-off-by: liyuerich <yue.li@daocloud.io>
This commit is contained in:
liyuerich 2023-08-31 18:35:36 +08:00
parent 7947052689
commit f55588fa0b
11 changed files with 78 additions and 31 deletions

View File

@ -293,15 +293,6 @@ func (f *FailureError) backtrace() {
// } // }
var ErrFailure error = FailureError{} var ErrFailure error = FailureError{}
// ExpectError expects an error happens, otherwise an exception raises
//
// Deprecated: use gomega.Expect().To(gomega.HaveOccurred()) or (better!) check
// specifically for the error that is expected with
// gomega.Expect().To(gomega.MatchError(gomega.ContainSubstring()))
func ExpectError(err error, explain ...interface{}) {
gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...)
}
// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. // ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error.
func ExpectNoError(err error, explain ...interface{}) { func ExpectNoError(err error, explain ...interface{}) {
ExpectNoErrorWithOffset(1, err, explain...) ExpectNoErrorWithOffset(1, err, explain...)

View File

@ -23,6 +23,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -101,7 +102,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() {
} }
} }
if test.disableAttach { if test.disableAttach {
framework.ExpectError(err, "Unexpected VolumeAttachment found") gomega.Expect(err).To(gomega.MatchError(apierrors.IsNotFound, "Unexpected VolumeAttachment found"))
} }
}) })

View File

@ -22,6 +22,8 @@ import (
"time" "time"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -430,7 +432,7 @@ func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConf
// check if the snapshot is deleted // check if the snapshot is deleted
_, err = dc.Resource(utils.SnapshotGVR).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{}) _, err = dc.Resource(utils.SnapshotGVR).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
framework.ExpectError(err) gomega.Expect(err).To(gomega.MatchError(apierrors.IsNotFound, "the snapshot is not deleted"))
} }
type snapshotMetricsTestConfig struct { type snapshotMetricsTestConfig struct {

View File

@ -24,6 +24,7 @@ import (
csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -151,11 +152,10 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
framework.Failf("error updating pvc size %q", pvc.Name) framework.Failf("error updating pvc size %q", pvc.Name)
} }
if test.expectFailure { if test.expectFailure {
err = testsuites.WaitForResizingCondition(ctx, pvc, m.cs, csiResizingConditionWait) gomega.Consistently(ctx, framework.GetObject(m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get, pvc.Name, metav1.GetOptions{})).WithTimeout(csiResizingConditionWait).
framework.ExpectError(err, "unexpected resizing condition on PVC") ShouldNot(gomega.HaveField("Status.Conditions", gomega.ContainElement(gomega.HaveField("Type", gomega.Equal("PersistentVolumeClaimResizing")))), "unexpected resizing condition on PVC")
return return
} }
ginkgo.By("Waiting for persistent volume resize to finish") ginkgo.By("Waiting for persistent volume resize to finish")
err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod) err = testsuites.WaitForControllerVolumeResize(ctx, pvc, m.cs, csiResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for CSI PV resize to finish") framework.ExpectNoError(err, "While waiting for CSI PV resize to finish")

View File

@ -325,10 +325,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
} }
ginkgo.By("Creating local PVC and PV") ginkgo.By("Creating local PVC and PV")
createLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}, immediateMode) createLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}, immediateMode)
pod, err := createLocalPod(ctx, config, testVol, nil) // createLocalPod will create a pod and wait for it to be running. In this case,
framework.ExpectError(err) // It's expected that the Pod fails to start.
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) _, err := createLocalPod(ctx, config, testVol, nil)
framework.ExpectError(err) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("is not Running")))
cleanupLocalPVCsPVs(ctx, config, []*localTestVolume{testVol}) cleanupLocalPVCsPVs(ctx, config, []*localTestVolume{testVol})
}) })
@ -348,8 +348,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() {
pod, err := config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{}) pod, err := config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, config.client, pod.Name, pod.Namespace, f.Timeouts.PodStart) getPod := e2epod.Get(f.ClientSet, pod)
framework.ExpectError(err) gomega.Consistently(ctx, getPod, f.Timeouts.PodStart, 2*time.Second).ShouldNot(e2epod.BeInPhase(v1.PodRunning))
cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol}) cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol})
}) })

View File

@ -1004,7 +1004,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
ginkgo.By("checking the claims are in pending state") ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
verifyPVCsPending(ctx, t.Client, createdClaims) verifyPVCsPending(ctx, t.Client, createdClaims)
ginkgo.By("creating a pod referring to the claims") ginkgo.By("creating a pod referring to the claims")

View File

@ -410,12 +410,12 @@ func deleteVolumeSnapshot(ctx context.Context, f *framework.Framework, dc dynami
switch pattern.SnapshotDeletionPolicy { switch pattern.SnapshotDeletionPolicy {
case storageframework.DeleteSnapshot: case storageframework.DeleteSnapshot:
ginkgo.By("checking the SnapshotContent has been deleted") ginkgo.By("checking the SnapshotContent has been deleted")
err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete) err = storageutils.EnsureGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete, "")
framework.ExpectNoError(err) framework.ExpectNoError(err)
case storageframework.RetainSnapshot: case storageframework.RetainSnapshot:
ginkgo.By("checking the SnapshotContent has not been deleted") ginkgo.By("checking the SnapshotContent has not been deleted")
err = storageutils.WaitForGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */) err = storageutils.EnsureNoGVRDeletion(ctx, dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */, "")
framework.ExpectError(err) framework.ExpectNoError(err)
} }
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/errors"
@ -157,6 +158,24 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.DeferCleanup(cleanup) ginkgo.DeferCleanup(cleanup)
var err error var err error
// create Pod with pvc
ginkgo.By("Creating a pod with PVC")
podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
SeLinuxLabel: e2epod.GetLinuxLabel(),
NodeSelection: l.config.ClientNodeSelection,
ImageID: e2epod.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, f.ClientSet, &podConfig, f.Timeouts.PodStart)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, l.pod)
framework.ExpectNoError(err, "While creating pods for expanding")
// Waiting for pod to run
ginkgo.By("Waiting for pod to run")
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil()) gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion
gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse()) gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())
@ -166,7 +185,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
newSize.Add(resource.MustParse("1Gi")) newSize.Add(resource.MustParse("1Gi"))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize) framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
_, err = ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet) _, err = ExpandPVCSize(ctx, l.resource.Pvc, newSize, f.ClientSet)
framework.ExpectError(err, "While updating non-expandable PVC") gomega.Expect(err).To(gomega.MatchError(apierrors.IsForbidden, "While updating non-expandable PVC"))
}) })
} else { } else {
ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) { ginkgo.It("Verify if offline PVC expansion works", func(ctx context.Context) {
@ -316,7 +335,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size
return true, nil return true, nil
}) })
if wait.Interrupted(waitErr) { if wait.Interrupted(waitErr) {
return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %v", lastUpdateError) return nil, fmt.Errorf("timed out attempting to update PVC size. last update error: %w", lastUpdateError)
} }
if waitErr != nil { if waitErr != nil {
return nil, fmt.Errorf("failed to expand PVC size (check logs for error): %v", waitErr) return nil, fmt.Errorf("failed to expand PVC size (check logs for error): %v", waitErr)

View File

@ -619,6 +619,40 @@ func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.Gro
return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout) return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
} }
// EnsureGVRDeletion checks that no object as defined by the group/version/kind and name is ever found during the given time period
func EnsureGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
var resourceClient dynamic.ResourceInterface
if namespace != "" {
resourceClient = c.Resource(gvr).Namespace(namespace)
} else {
resourceClient = c.Resource(gvr)
}
err := framework.Gomega().Eventually(ctx, func(ctx context.Context) error {
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
return err
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.MatchError(apierrors.IsNotFound, fmt.Sprintf("failed to delete %s %s", gvr, objectName)))
return err
}
// EnsureNoGVRDeletion checks that an object as defined by the group/version/kind and name has not been deleted during the given time period
func EnsureNoGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration, namespace string) error {
var resourceClient dynamic.ResourceInterface
if namespace != "" {
resourceClient = c.Resource(gvr).Namespace(namespace)
} else {
resourceClient = c.Resource(gvr)
}
err := framework.Gomega().Consistently(ctx, func(ctx context.Context) error {
_, err := resourceClient.Get(ctx, objectName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get %s %s: %w", gvr.Resource, objectName, err)
}
return nil
}).WithTimeout(timeout).WithPolling(poll).Should(gomega.Succeed())
return err
}
// WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted // WaitForNamespacedGVRDeletion waits until a namespaced object has been deleted
func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error { func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName) framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)

View File

@ -194,8 +194,8 @@ var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, c, pod.Name, pod.Namespace, f.Timeouts.PodStart) getPod := e2epod.Get(f.ClientSet, pod)
framework.ExpectError(err) gomega.Consistently(ctx, getPod, f.Timeouts.PodStart, 2*time.Second).ShouldNot(e2epod.BeInPhase(v1.PodRunning))
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))

View File

@ -552,7 +552,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -591,7 +591,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
framework.Logf(err.Error()) framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{}) claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -720,7 +720,7 @@ func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface
return true, nil // No PVs remain return true, nil // No PVs remain
}) })
if err != nil { if err != nil {
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %w", err) return remainingPVs, fmt.Errorf("error waiting for PVs to be deleted: %w", err)
} }
return nil, nil return nil, nil
} }