From 7be77f9eccf25f704be5b89aded4c65a6ce3695b Mon Sep 17 00:00:00 2001 From: Joe Betz Date: Mon, 11 Apr 2022 13:19:54 -0400 Subject: [PATCH] changes code to improve output for files under test/e2e/storage --- test/e2e/storage/csi_mock_volume.go | 8 +- test/e2e/storage/drivers/in_tree.go | 88 ++++++++++++++----- test/e2e/storage/external/external.go | 4 +- .../flexvolume_mounted_volume_resize.go | 4 +- test/e2e/storage/flexvolume_online_resize.go | 4 +- test/e2e/storage/mounted_volume_resize.go | 4 +- test/e2e/storage/persistent_volumes-local.go | 4 +- test/e2e/storage/regional_pd.go | 8 +- test/e2e/storage/testsuites/provisioning.go | 4 +- test/e2e/storage/testsuites/volumeperf.go | 21 +++-- test/e2e/storage/utils/pod.go | 5 +- test/e2e/storage/volume_metrics.go | 20 +++-- .../vsphere/persistent_volumes-vsphere.go | 4 +- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 4 +- test/e2e/storage/vsphere/vsphere_scale.go | 8 +- .../storage/vsphere/vsphere_statefulsets.go | 12 ++- test/e2e/storage/vsphere/vsphere_stress.go | 16 +++- test/e2e/storage/vsphere/vsphere_utils.go | 8 +- .../vsphere/vsphere_volume_diskformat.go | 8 +- .../storage/vsphere/vsphere_volume_fstype.go | 4 +- .../vsphere/vsphere_volume_node_delete.go | 4 +- .../vsphere/vsphere_volume_node_poweroff.go | 8 +- .../vsphere/vsphere_volume_placement.go | 4 +- .../vsphere/vsphere_volume_vsan_policy.go | 4 +- 24 files changed, 193 insertions(+), 65 deletions(-) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 8c4ed4a4c09..8b97c69b100 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -684,7 +684,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { sc, pvc, pod := createPod(pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") - framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion") + if !*sc.AllowVolumeExpansion { + framework.Fail("failed creating sc with allowed expansion") + } err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) @@ -777,7 +779,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { sc, pvc, pod := createPod(pvcReference) gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing") - framework.ExpectEqual(*sc.AllowVolumeExpansion, true, "failed creating sc with allowed expansion") + if !*sc.AllowVolumeExpansion { + framework.Fail("failed creating sc with allowed expansion") + } err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index ccbc820147e..e8cb7a84951 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -124,7 +124,9 @@ func (n *nfsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { nv, ok := e2evolume.(*nfsVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume) + } return &v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ Server: nv.serverHost, @@ -136,7 +138,9 @@ func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { nv, ok := e2evolume.(*nfsVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to NFS test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume) + } return &v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ Server: nv.serverHost, @@ -264,7 +268,9 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern storageframework.TestPatte func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { gv, ok := e2evolume.(*glusterVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") + if !ok { + framework.Failf("failed to cast test volume type %T to the Gluster test volume", e2evolume) + } name := gv.prefix + "-server" return &v1.VolumeSource{ @@ -279,7 +285,9 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := e2evolume.(*glusterVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Gluster test volume") + if !ok { + framework.Failf("failed to cast test volume of type %T to the Gluster test volume", e2evolume) + } name := gv.prefix + "-server" return &v1.PersistentVolumeSource{ @@ -399,7 +407,9 @@ func (i *iSCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { iv, ok := e2evolume.(*iSCSIVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") + if !ok { + framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume) + } volSource := v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ @@ -417,7 +427,9 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume st func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { iv, ok := e2evolume.(*iSCSIVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to iSCSI test volume") + if !ok { + framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume) + } pvSource := v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ @@ -576,7 +588,9 @@ func (r *rbdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { rv, ok := e2evolume.(*rbdVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") + if !ok { + framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume) + } volSource := v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ @@ -598,7 +612,9 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { rv, ok := e2evolume.(*rbdVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to RBD test volume") + if !ok { + framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume) + } f := rv.f ns := f.Namespace @@ -699,7 +715,9 @@ func (c *cephFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { cv, ok := e2evolume.(*cephVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume) + } return &v1.VolumeSource{ CephFS: &v1.CephFSVolumeSource{ @@ -715,7 +733,9 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume s func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { cv, ok := e2evolume.(*cephVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Ceph test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume) + } ns := cv.f.Namespace @@ -873,7 +893,9 @@ func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern storageframework.Tes func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { hv, ok := e2evolume.(*hostPathSymlinkVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Hostpath Symlink test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Hostpath Symlink test volume", e2evolume) + } // hostPathSymlink doesn't support readOnly volume if readOnly { @@ -1134,7 +1156,9 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { gv, ok := e2evolume.(*gcePdVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume) + } volSource := v1.VolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: gv.volumeName, @@ -1149,7 +1173,9 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume st func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { gv, ok := e2evolume.(*gcePdVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to GCE PD test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume) + } pvSource := v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: gv.volumeName, @@ -1268,7 +1294,9 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern storageframework.TestPattern func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { vsv, ok := e2evolume.(*vSphereVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the cSphere test volume", e2evolume) + } // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1288,7 +1316,9 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { vsv, ok := e2evolume.(*vSphereVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to vSphere test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the vSphere test volume", e2evolume) + } // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1407,7 +1437,9 @@ func (a *azureDiskDriver) SkipUnsupportedTest(pattern storageframework.TestPatte func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { av, ok := e2evolume.(*azureDiskVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume) + } diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] kind := v1.AzureManagedDisk @@ -1427,7 +1459,9 @@ func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := e2evolume.(*azureDiskVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume) + } diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] @@ -1552,7 +1586,9 @@ func (a *awsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { av, ok := e2evolume.(*awsVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the AWS test volume", e2evolume) + } volSource := v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: av.volumeName, @@ -1567,7 +1603,9 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume stor func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := e2evolume.(*awsVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to AWS test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the AWS test volume", e2evolume) + } pvSource := v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: av.volumeName, @@ -1808,7 +1846,9 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { lv, ok := e2evolume.(*localVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to local test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the local test volume", e2evolume) + } return &v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ Path: lv.ltr.Path, @@ -1909,7 +1949,9 @@ func (a *azureFileDriver) SkipUnsupportedTest(pattern storageframework.TestPatte func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource { av, ok := e2evolume.(*azureFileVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume) + } volSource := v1.VolumeSource{ AzureFile: &v1.AzureFileVolumeSource{ SecretName: av.secretName, @@ -1922,7 +1964,9 @@ func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolum func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { av, ok := e2evolume.(*azureFileVolume) - framework.ExpectEqual(ok, true, "Failed to cast test volume to Azure test volume") + if !ok { + framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume) + } pvSource := v1.PersistentVolumeSource{ AzureFile: &v1.AzureFilePersistentVolumeSource{ SecretName: av.secretName, diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index a1a24674a9d..c16d25fe553 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -293,7 +293,9 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storagefra framework.ExpectNoError(err, "patch items") sc, ok = items[0].(*storagev1.StorageClass) - framework.ExpectEqual(ok, true, "storage class from %s", d.StorageClass.FromFile) + if !ok { + framework.Failf("storage class from %s", d.StorageClass.FromFile) + } } framework.ExpectNotEqual(sc, nil, "storage class is unexpectantly nil") diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index fca03070ba4..95c2d4f3db5 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -99,7 +99,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] fmt.Printf("storage class creation error: %v\n", err) } framework.ExpectNoError(err, "Error creating resizable storage class") - framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true) + if !*resizableSc.AllowVolumeExpansion { + framework.Failf("Class %s does not allow volume expansion", resizableSc.Name) + } pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ StorageClassName: &(resizableSc.Name), diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 78bc2e4d41a..9c2be519699 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -94,7 +94,9 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan fmt.Printf("storage class creation error: %v\n", err) } framework.ExpectNoError(err, "Error creating resizable storage class: %v", err) - framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true) + if !*resizableSc.AllowVolumeExpansion { + framework.Failf("Class %s does not allow volume expansion", resizableSc.Name) + } pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ StorageClassName: &(resizableSc.Name), diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 0aa7ded12be..a5040492776 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -90,7 +90,9 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun } sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing")) - framework.ExpectEqual(*sc.AllowVolumeExpansion, true) + if !*sc.AllowVolumeExpansion { + framework.Failf("Class %s does not allow volume expansion", sc.Name) + } pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index b72a3fe84d4..f6114b74f6e 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -835,7 +835,9 @@ func setupLocalVolumes(config *localTestConfig, localVolumeType localVolumeType, vols := []*localTestVolume{} for i := 0; i < count; i++ { ltrType, ok := setupLocalVolumeMap[localVolumeType] - framework.ExpectEqual(ok, true) + if !ok { + framework.Failf("Invalid localVolumeType: %v", localVolumeType) + } ltr := config.ltrMgr.Create(node, ltrType, nil) vols = append(vols, &localTestVolume{ ltr: ltr, diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 5e77d5f8db7..3adb6401fe8 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -229,7 +229,9 @@ func testZonalFailover(c clientset.Interface, ns string) { err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, framework.Poll, statefulSetReadyTimeout) if err != nil { pod := getPod(c, ns, regionalPDLabels) - framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions) + if !podutil.IsPodReadyConditionTrue(pod.Status) { + framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions) + } framework.ExpectNoError(err) } @@ -279,7 +281,9 @@ func testZonalFailover(c clientset.Interface, ns string) { err = waitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) if err != nil { pod := getPod(c, ns, regionalPDLabels) - framework.ExpectEqual(podutil.IsPodReadyConditionTrue(pod.Status), true, "The statefulset pod has the following conditions: %s", pod.Status.Conditions) + if !podutil.IsPodReadyConditionTrue(pod.Status) { + framework.Failf("The statefulset pod %s was expected to be ready, instead has the following conditions: %v", pod.Name, pod.Status.Conditions) + } framework.ExpectNoError(err) } diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 0b1c434ea12..1491880d67d 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -701,7 +701,9 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v break } } - framework.ExpectEqual(found, true) + if !found { + framework.Failf("Actual access modes %v are not in claim's access mode", pv.Spec.AccessModes) + } } framework.ExpectEqual(pv.Spec.ClaimRef.Name, claim.ObjectMeta.Name) diff --git a/test/e2e/storage/testsuites/volumeperf.go b/test/e2e/storage/testsuites/volumeperf.go index 40f41697b1d..51bec7e1079 100644 --- a/test/e2e/storage/testsuites/volumeperf.go +++ b/test/e2e/storage/testsuites/volumeperf.go @@ -19,13 +19,14 @@ package testsuites import ( "context" "fmt" - "github.com/davecgh/go-spew/spew" "sync" "time" + "github.com/davecgh/go-spew/spew" + "github.com/onsi/ginkgo/v2" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -219,7 +220,9 @@ func createPerformanceStats(stats *performanceStats, provisionCount int, pvcs [] var min, max, sum time.Duration for _, pvc := range pvcs { pvcMetric, ok := stats.perObjectInterval[pvc.Name] - framework.ExpectEqual(ok, true) + if !ok { + framework.Failf("PVC %s not found in perObjectInterval", pvc.Name) + } elapsedTime := pvcMetric.elapsed sum += elapsedTime @@ -271,7 +274,9 @@ func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *perform // Check if PVC entered the bound state if oldPVC.Status.Phase != v1.ClaimBound && newPVC.Status.Phase == v1.ClaimBound { newPVCInterval, ok := pvcMetrics.perObjectInterval[newPVC.Name] - framework.ExpectEqual(ok, true, "PVC %s should exist in interval map already", newPVC.Name) + if !ok { + framework.Failf("PVC %s should exist in interval map already", newPVC.Name) + } count++ newPVCInterval.enterDesiredState = now newPVCInterval.elapsed = now.Sub(newPVCInterval.create) @@ -299,9 +304,13 @@ func newPVCWatch(f *framework.Framework, provisionCount int, pvcMetrics *perform cache.ResourceEventHandlerFuncs{ UpdateFunc: func(oldObj, newObj interface{}) { oldPVC, ok := oldObj.(*v1.PersistentVolumeClaim) - framework.ExpectEqual(ok, true) + if !ok { + framework.Failf("Expected a PVC, got instead an old object of type %T", oldObj) + } newPVC, ok := newObj.(*v1.PersistentVolumeClaim) - framework.ExpectEqual(ok, true) + if !ok { + framework.Failf("Expected a PVC, got instead a new object of type %T", newObj) + } checkPVCBound(oldPVC, newPVC) }, diff --git a/test/e2e/storage/utils/pod.go b/test/e2e/storage/utils/pod.go index 4531a6d293b..0c9307d2777 100644 --- a/test/e2e/storage/utils/pod.go +++ b/test/e2e/storage/utils/pod.go @@ -152,7 +152,10 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { break } } - framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet") + if !isPidChanged { + framework.Fail("Kubelet PID remained unchanged after restarting Kubelet") + } + framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back") time.Sleep(30 * time.Second) } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 6db118b985f..25c550cc243 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -264,7 +264,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { for _, key := range volumeStatKeys { kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics) - framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName) + if !found { + framework.Failf("PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName) + } } framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) @@ -328,7 +330,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { for _, key := range volumeStatKeys { kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics) - framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName) + if !found { + framework.Failf("PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName) + } } framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) @@ -428,7 +432,9 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Forced detach metric should be present forceDetachKey := "attachdetach_controller_forced_detaches" _, ok := updatedControllerMetrics[forceDetachKey] - framework.ExpectEqual(ok, true, "Key %q not found in A/D Controller metrics", forceDetachKey) + if !ok { + framework.Failf("Key %q not found in A/D Controller metrics", forceDetachKey) + } // Wait and validate totalVolumesKey := "attachdetach_controller_total_volumes" @@ -715,10 +721,14 @@ func verifyMetricCount(oldMetrics, newMetrics *storageControllerMetrics, metricN newLatencyCount, ok := newMetrics.latencyMetrics[metricName] if !expectFailure { - framework.ExpectEqual(ok, true, "Error getting updated latency metrics for %s", metricName) + if !ok { + framework.Failf("Error getting updated latency metrics for %s", metricName) + } } newStatusCounts, ok := newMetrics.statusMetrics[metricName] - framework.ExpectEqual(ok, true, "Error getting updated status metrics for %s", metricName) + if !ok { + framework.Failf("Error getting updated status metrics for %s", metricName) + } newStatusCount := int64(0) if expectFailure { diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index bf483051f5f..6a6fd369039 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -108,7 +108,9 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() ginkgo.By("Verify disk should be attached to the node") isAttached, err := diskIsAttached(volumePath, node) framework.ExpectNoError(err) - framework.ExpectEqual(isAttached, true, "disk is not attached with the node") + if !isAttached { + framework.Failf("Disk %s is not attached with the node", volumePath) + } }) ginkgo.AfterEach(func() { diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index fff12d5e614..8992342abcc 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -131,7 +131,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo ginkgo.By("Verify the volume is attached to the node") isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(verifyDiskAttachedError) - framework.ExpectEqual(isVolumeAttached, true) + if !isVolumeAttached { + framework.Failf("Disk %s is not attached with the node %s", pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + } ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}) diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 30101a6166c..b245a869c2e 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -86,8 +86,12 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod) numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances) - framework.ExpectNotEqual(numberOfInstances > 5, true, "Maximum allowed instances are 5") - framework.ExpectNotEqual(numberOfInstances > volumeCount, true, "Number of instances should be less than the total volume count") + if numberOfInstances > 5 { + framework.Failf("Maximum 5 instances allowed, got instead: %v", numberOfInstances) + } + if numberOfInstances > volumeCount { + framework.Failf("Number of instances: %v cannot be greater than volume count: %v", numberOfInstances, volumeCount) + } policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 71e7fe6ec85..5b63bbdfe23 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -113,7 +113,9 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { for _, sspod := range ssPodsBeforeScaleDown.Items { _, err := client.CoreV1().Pods(namespace).Get(context.TODO(), sspod.Name, metav1.GetOptions{}) if err != nil { - framework.ExpectEqual(apierrors.IsNotFound(err), true) + if !apierrors.IsNotFound(err) { + framework.Failf("Error in getting Pod %s: %v", sspod.Name, err) + } for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) @@ -146,9 +148,13 @@ var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) // Verify scale up has re-attached the same volumes and not introduced new volume - framework.ExpectEqual(volumesBeforeScaleDown[vSpherediskPath] == "", false) + if volumesBeforeScaleDown[vSpherediskPath] == "" { + framework.Failf("Volume: %q was not attached to the Node: %q before scale down", vSpherediskPath, sspod.Spec.NodeName) + } isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) - framework.ExpectEqual(isVolumeAttached, true) + if !isVolumeAttached { + framework.Failf("Volume: %q is not attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) + } framework.ExpectNoError(verifyDiskAttachedError) } } diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 3eb9065081c..a4ceed43fe3 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -72,11 +72,17 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. instances = GetAndExpectIntEnvVar(VCPStressInstances) - framework.ExpectEqual(instances <= volumesPerNode*len(nodeList.Items), true, fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) - framework.ExpectEqual(instances > len(scNames), true, "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") + if instances > volumesPerNode*len(nodeList.Items) { + framework.Failf("Number of Instances should be less or equal: %v, got instead %v", volumesPerNode*len(nodeList.Items), instances) + } + if instances <= len(scNames) { + framework.Failf("VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes, got instead %v", instances) + } iterations = GetAndExpectIntEnvVar(VCPStressIterations) - framework.ExpectEqual(iterations > 0, true, "VCP_STRESS_ITERATIONS should be greater than 0") + if iterations <= 0 { + framework.Failf("VCP_STRESS_ITERATIONS should be greater than 0, got instead %v", iterations) + } policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) @@ -160,7 +166,9 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) - framework.ExpectEqual(isVolumeAttached, true) + if !isVolumeAttached { + framework.Failf("Volume: %s is not attached to the node: %v", persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) + } framework.ExpectNoError(verifyDiskAttachedError) ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 6c539dc1cdd..ce5b7c4a59b 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -378,7 +378,9 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste // Verify disks are attached to the node isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) framework.ExpectNoError(err) - framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) + if !isAttached { + framework.Failf("disk %v is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) + } // Verify Volumes are accessible filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) @@ -788,7 +790,9 @@ func invokeVCenterServiceControl(command, service, host string) error { func expectVolumeToBeAttached(nodeName, volumePath string) { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - framework.ExpectEqual(isAttached, true, fmt.Sprintf("disk: %s is not attached with the node", volumePath)) + if !isAttached { + framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) + } } // expectVolumesToBeAttached checks if the given Volumes are attached to the diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index f9cd9a2ea32..72c99fd1bfa 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -152,7 +152,9 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) - framework.ExpectEqual(isAttached, true) + if !isAttached { + framework.Failf("Volume: %s is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) + } framework.ExpectNoError(err) ginkgo.By("Verify Disk Format") @@ -198,7 +200,9 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath } } - framework.ExpectEqual(diskFound, true, "Failed to find disk") + if !diskFound { + framework.Failf("Failed to find disk: %s", pvVolumePath) + } isDiskFormatCorrect := false if diskFormat == "eagerzeroedthick" { if eagerlyScrub == true && thinProvisioned == false { diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index bdcc53e5e59..80837f0acbe 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -149,7 +149,9 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa isFound = true } } - framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure") + if !isFound { + framework.Failf("Unable to verify MountVolume.MountDevice failure for volume %s", persistentvolumes[0].Name) + } } func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index e93683a67da..2501af11c49 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -55,7 +55,9 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] ginkgo.By("Get total Ready nodes") nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) - framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test") + if len(nodeList.Items) < 2 { + framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) + } totalNodesCount := len(nodeList.Items) nodeVM := nodeList.Items[0] diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 6037a3e6e56..30131d4be92 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -61,7 +61,9 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet) framework.ExpectNoError(err) - framework.ExpectEqual(len(nodeList.Items) > 1, true, "At least 2 nodes are required for this test") + if len(nodeList.Items) < 2 { + framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) + } }) /* @@ -113,7 +115,9 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) isAttached, err := diskIsAttached(volumePath, node1) framework.ExpectNoError(err) - framework.ExpectEqual(isAttached, true, "Disk is not attached to the node") + if !isAttached { + framework.Failf("Volume: %s is not attached to the node: %v", volumePath, node1) + } ginkgo.By(fmt.Sprintf("Power off the node: %v", node1)) diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 7688cc25dd3..2840e0834da 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -374,7 +374,9 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st for _, volumePath := range volumePaths { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - framework.ExpectEqual(isAttached, true, "disk:"+volumePath+" is not attached with the node") + if !isAttached { + framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) + } } return pod } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 9a7c811558a..a9ca1b3460b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -341,7 +341,9 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, control nodeInfo := TestContext.NodeMapper.GetNodeInfo(controlPlaneNode) isVMPresentFlag, err := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef) framework.ExpectNoError(err) - framework.ExpectEqual(isVMPresentFlag, false, errorMsg) + if isVMPresentFlag { + framework.Failf("VM with name %s is present, %s", dummyVMFullName, errorMsg) + } } func getControlPlaneNode(client clientset.Interface) (string, error) {