From 622eac234772d00c88867cf6d16fd05c561c58de Mon Sep 17 00:00:00 2001 From: "wen.rui" Date: Mon, 4 Sep 2023 20:02:11 +0800 Subject: [PATCH] e2e_storage:stop using deprecated framework.ExpectNotEqual --- test/e2e/storage/external/external.go | 2 +- test/e2e/storage/pv_protection.go | 2 +- test/e2e/storage/pvc_protection.go | 6 +++--- test/e2e/storage/testsuites/provisioning.go | 4 ++-- test/e2e/storage/testsuites/subpath.go | 2 +- test/e2e/storage/testsuites/topology.go | 3 ++- test/e2e/storage/testsuites/volumemode.go | 2 +- test/e2e/storage/volume_metrics.go | 20 ++++++++++---------- test/e2e/storage/volume_provisioning.go | 2 +- 9 files changed, 22 insertions(+), 21 deletions(-) diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 3d56192ed5c..85d9d73e76a 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -298,7 +298,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context, } } - framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil") + gomega.Expect(sc).ToNot(gomega.BeNil(), "storage class is unexpectantly nil") if fsType != "" { if sc.Parameters == nil { diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 8db1c01782a..4013953e185 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -124,7 +124,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.By("Checking that the PV status is Terminating") pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PV status") - framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil) + gomega.Expect(pv.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil()) ginkgo.By("Deleting the PVC that is bound to the PV") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 09dc74e33e8..cc49cf2f2ab 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil()) ginkgo.By("Deleting the pod that uses the PVC") err = e2epod.DeletePodWithWait(ctx, client, pod) @@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil()) ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "") @@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.By("Checking again that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "While checking PVC status") - framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) + gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil()) ginkgo.By("Deleting the first pod that uses the PVC") err = e2epod.DeletePodWithWait(ctx, client, pod) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 62d6267dac3..a89af2cffa6 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -965,7 +965,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") - framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") + gomega.Expect(runningPod.Spec.NodeName).ToNot(gomega.Equal(actualNodeName), "second pod should have run on a different node") StopPod(ctx, client, pod) pod = nil } @@ -973,7 +973,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error - framework.ExpectNotEqual(len(claims), 0) + gomega.Expect(claims).ToNot(gomega.BeEmpty()) namespace := claims[0].Namespace ginkgo.By("creating claims") diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 1b1a7ad387c..810b2c7b916 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -998,7 +998,7 @@ func testSubpathReconstruction(ctx context.Context, f *framework.Framework, host podNode = &nodeList.Items[i] } } - framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes") + gomega.Expect(podNode).ToNot(gomega.BeNil(), "pod node should exist in schedulable nodes") storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath) diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 097bb21f783..74292a8e461 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -24,6 +24,7 @@ import ( "math/rand" "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -138,7 +139,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt } l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType) - framework.ExpectNotEqual(l.resource.Sc, nil, "driver failed to provide a StorageClass") + gomega.Expect(l.resource.Sc).ToNot(gomega.BeNil(), "driver failed to provide a StorageClass") l.resource.Sc.VolumeBindingMode = &pattern.BindingMode testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index c9f6804b40a..28f1f1a71bc 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -389,7 +389,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa // Reload the pod to get its node pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node") + gomega.Expect(pod.Spec.NodeName).ToNot(gomega.BeEmpty(), "pod should be scheduled to a node") node, err := l.cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 2bd4a115967..3ff05fc0912 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !ephemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } pod := makePod(f, pvc, ephemeral) @@ -150,8 +150,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { updatedStorageMetrics := waitForDetachAndGrabMetrics(ctx, storageOpMetrics, metricsGrabber, pluginName) - framework.ExpectNotEqual(len(updatedStorageMetrics.latencyMetrics), 0, "Error fetching c-m updated storage metrics") - framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") + gomega.Expect(updatedStorageMetrics.latencyMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics") + gomega.Expect(updatedStorageMetrics.statusMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics") volumeOperations := []string{"volume_detach", "volume_attach"} @@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !ephemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } ginkgo.By("Creating a pod and expecting it to fail") @@ -205,14 +205,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.ExpectNoError(err, "failed to get controller manager metrics") updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics, pluginName) - framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") + gomega.Expect(updatedStorageMetrics.statusMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics") } filesystemMode := func(ctx context.Context, isEphemeral bool) { if !isEphemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } pod := makePod(f, pvc, isEphemeral) @@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !isEphemeral { pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(ctx, pvcBlock, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvcBlock, nil) + gomega.Expect(pvcBlock).ToNot(gomega.BeNil()) } pod := makePod(f, pvcBlock, isEphemeral) @@ -343,7 +343,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !isEphemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } pod := makePod(f, pvc, isEphemeral) @@ -374,7 +374,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !isEphemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } pod := makePod(f, pvc, isEphemeral) @@ -404,7 +404,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { if !isEphemeral { pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) - framework.ExpectNotEqual(pvc, nil) + gomega.Expect(pvc).ToNot(gomega.BeNil()) } pod := makePod(f, pvc, isEphemeral) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 5b5de8592e5..1a0a32cf263 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -275,7 +275,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } if zone, ok := test.Parameters["zone"]; ok { - framework.ExpectNotEqual(len(zone), 0, "expect at least one zone") + gomega.Expect(zone).ToNot(gomega.BeEmpty(), "expect at least one zone") } ginkgo.By("Testing " + test.Name)