Merge pull request #120402 from Rei1010/storageClean

e2e_storage:stop using deprecated framework.ExpectNotEqual
This commit is contained in:
Kubernetes Prow Robot 2023-09-27 05:14:08 -07:00 committed by GitHub
commit fe6d64d080
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 22 additions and 21 deletions

View File

@ -298,7 +298,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context,
} }
} }
framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil") gomega.Expect(sc).ToNot(gomega.BeNil(), "storage class is unexpectantly nil")
if fsType != "" { if fsType != "" {
if sc.Parameters == nil { if sc.Parameters == nil {

View File

@ -124,7 +124,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Checking that the PV status is Terminating") ginkgo.By("Checking that the PV status is Terminating")
pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) pv, err = client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PV status") framework.ExpectNoError(err, "While checking PV status")
framework.ExpectNotEqual(pv.ObjectMeta.DeletionTimestamp, nil) gomega.Expect(pv.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil())
ginkgo.By("Deleting the PVC that is bound to the PV") ginkgo.By("Deleting the PVC that is bound to the PV")
err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(ctx, pvc.Name, *metav1.NewDeleteOptions(0))

View File

@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking that the PVC status is Terminating") ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil())
ginkgo.By("Deleting the pod that uses the PVC") ginkgo.By("Deleting the pod that uses the PVC")
err = e2epod.DeletePodWithWait(ctx, client, pod) err = e2epod.DeletePodWithWait(ctx, client, pod)
@ -153,7 +153,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking that the PVC status is Terminating") ginkgo.By("Checking that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil())
ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") ginkgo.By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted")
secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "") secondPod, err2 := e2epod.CreateUnschedulablePod(ctx, client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "")
@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.By("Checking again that the PVC status is Terminating") ginkgo.By("Checking again that the PVC status is Terminating")
pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "While checking PVC status") framework.ExpectNoError(err, "While checking PVC status")
framework.ExpectNotEqual(pvc.ObjectMeta.DeletionTimestamp, nil) gomega.Expect(pvc.ObjectMeta.DeletionTimestamp).ToNot(gomega.BeNil())
ginkgo.By("Deleting the first pod that uses the PVC") ginkgo.By("Deleting the first pod that uses the PVC")
err = e2epod.DeletePodWithWait(ctx, client, pod) err = e2epod.DeletePodWithWait(ctx, client, pod)

View File

@ -965,7 +965,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") gomega.Expect(runningPod.Spec.NodeName).ToNot(gomega.Equal(actualNodeName), "second pod should have run on a different node")
StopPod(ctx, client, pod) StopPod(ctx, client, pod)
pod = nil pod = nil
} }
@ -973,7 +973,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts
// TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error var err error
framework.ExpectNotEqual(len(claims), 0) gomega.Expect(claims).ToNot(gomega.BeEmpty())
namespace := claims[0].Namespace namespace := claims[0].Namespace
ginkgo.By("creating claims") ginkgo.By("creating claims")

View File

@ -998,7 +998,7 @@ func testSubpathReconstruction(ctx context.Context, f *framework.Framework, host
podNode = &nodeList.Items[i] podNode = &nodeList.Items[i]
} }
} }
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes") gomega.Expect(podNode).ToNot(gomega.BeNil(), "pod node should exist in schedulable nodes")
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath) storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath)

View File

@ -24,6 +24,7 @@ import (
"math/rand" "math/rand"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@ -138,7 +139,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt
} }
l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType) l.resource.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType)
framework.ExpectNotEqual(l.resource.Sc, nil, "driver failed to provide a StorageClass") gomega.Expect(l.resource.Sc).ToNot(gomega.BeNil(), "driver failed to provide a StorageClass")
l.resource.Sc.VolumeBindingMode = &pattern.BindingMode l.resource.Sc.VolumeBindingMode = &pattern.BindingMode
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange

View File

@ -389,7 +389,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
// Reload the pod to get its node // Reload the pod to get its node
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pod.Spec.NodeName, "", "pod should be scheduled to a node") gomega.Expect(pod.Spec.NodeName).ToNot(gomega.BeEmpty(), "pod should be scheduled to a node")
node, err := l.cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) node, err := l.cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -136,7 +136,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !ephemeral { if !ephemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvc, ephemeral) pod := makePod(f, pvc, ephemeral)
@ -150,8 +150,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
updatedStorageMetrics := waitForDetachAndGrabMetrics(ctx, storageOpMetrics, metricsGrabber, pluginName) updatedStorageMetrics := waitForDetachAndGrabMetrics(ctx, storageOpMetrics, metricsGrabber, pluginName)
framework.ExpectNotEqual(len(updatedStorageMetrics.latencyMetrics), 0, "Error fetching c-m updated storage metrics") gomega.Expect(updatedStorageMetrics.latencyMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics")
framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") gomega.Expect(updatedStorageMetrics.statusMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics")
volumeOperations := []string{"volume_detach", "volume_attach"} volumeOperations := []string{"volume_detach", "volume_attach"}
@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !ephemeral { if !ephemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name) framework.ExpectNoError(err, "failed to create PVC %s/%s", pvc.Namespace, pvc.Name)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
ginkgo.By("Creating a pod and expecting it to fail") ginkgo.By("Creating a pod and expecting it to fail")
@ -205,14 +205,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(err, "failed to get controller manager metrics") framework.ExpectNoError(err, "failed to get controller manager metrics")
updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics, pluginName) updatedStorageMetrics := getControllerStorageMetrics(updatedControllerMetrics, pluginName)
framework.ExpectNotEqual(len(updatedStorageMetrics.statusMetrics), 0, "Error fetching c-m updated storage metrics") gomega.Expect(updatedStorageMetrics.statusMetrics).ToNot(gomega.BeEmpty(), "Error fetching c-m updated storage metrics")
} }
filesystemMode := func(ctx context.Context, isEphemeral bool) { filesystemMode := func(ctx context.Context, isEphemeral bool) {
if !isEphemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvc, isEphemeral) pod := makePod(f, pvc, isEphemeral)
@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !isEphemeral { if !isEphemeral {
pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(ctx, pvcBlock, metav1.CreateOptions{}) pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(ctx, pvcBlock, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvcBlock, nil) gomega.Expect(pvcBlock).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvcBlock, isEphemeral) pod := makePod(f, pvcBlock, isEphemeral)
@ -343,7 +343,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !isEphemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvc, isEphemeral) pod := makePod(f, pvc, isEphemeral)
@ -374,7 +374,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !isEphemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvc, isEphemeral) pod := makePod(f, pvc, isEphemeral)
@ -404,7 +404,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
if !isEphemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) gomega.Expect(pvc).ToNot(gomega.BeNil())
} }
pod := makePod(f, pvc, isEphemeral) pod := makePod(f, pvc, isEphemeral)

View File

@ -275,7 +275,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
} }
if zone, ok := test.Parameters["zone"]; ok { if zone, ok := test.Parameters["zone"]; ok {
framework.ExpectNotEqual(len(zone), 0, "expect at least one zone") gomega.Expect(zone).ToNot(gomega.BeEmpty(), "expect at least one zone")
} }
ginkgo.By("Testing " + test.Name) ginkgo.By("Testing " + test.Name)