From 542b07cd6a0eca184fe946dba60a2af1b2c8acff Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Tue, 2 Apr 2019 16:10:56 +0000 Subject: [PATCH] Use framework.ExpectNoError() for e2e/storage The e2e test framework has ExpectNoError() for readable test code. This replaces Expect(err).NotTo(HaveOccurred()) with it for e2e/storage. --- test/e2e/storage/csi_volumes.go | 2 +- test/e2e/storage/detach_mounted.go | 13 +++-- test/e2e/storage/empty_dir_wrapper.go | 6 +-- test/e2e/storage/ephemeral_volume.go | 3 +- .../flexvolume_mounted_volume_resize.go | 20 ++++---- test/e2e/storage/flexvolume_online_resize.go | 18 +++---- .../generic_persistent_volume-disruptive.go | 6 +-- test/e2e/storage/mounted_volume_resize.go | 18 +++---- .../nfs_persistent_volume-disruptive.go | 30 +++++------ test/e2e/storage/pd.go | 2 +- test/e2e/storage/persistent_volumes-gce.go | 14 +++--- test/e2e/storage/persistent_volumes-local.go | 42 ++++++++-------- test/e2e/storage/persistent_volumes.go | 23 ++++----- test/e2e/storage/pv_protection.go | 16 +++--- test/e2e/storage/pvc_protection.go | 30 +++++------ test/e2e/storage/regional_pd.go | 36 ++++++------- test/e2e/storage/subpath.go | 5 +- test/e2e/storage/volume_expand.go | 30 +++++------ test/e2e/storage/volume_metrics.go | 50 +++++++++---------- 19 files changed, 180 insertions(+), 184 deletions(-) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 0d7a2953c82..6056a10067a 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -134,7 +134,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela // Use different zones for pod and PV zones, err := framework.GetClusterZones(cs) - Expect(err).ToNot(HaveOccurred()) + framework.ExpectNoError(err) Expect(zones.Len()).To(BeNumerically(">=", 2)) zonesList := zones.UnsortedList() podZoneIndex := rand.Intn(zones.Len()) diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index 4ee571e8cfb..5097672e407 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -32,7 +32,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) var ( @@ -80,31 +79,31 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { clientPod := getFlexVolumePod(volumeSource, node.Name) By("Creating pod that uses slow format volume") pod, err := cs.CoreV1().Pods(ns.Name).Create(clientPod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs) By("waiting for volumes to be attached to node") err = waitForVolumesAttached(cs, node.Name, uniqueVolumeName) - Expect(err).NotTo(HaveOccurred(), "while waiting for volume to attach to %s node", node.Name) + framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name) By("waiting for volume-in-use on the node after pod creation") err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) - Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use") + framework.ExpectNoError(err, "while waiting for volume in use") By("waiting for kubelet to start mounting the volume") time.Sleep(20 * time.Second) By("Deleting the flexvolume pod") err = framework.DeletePodWithWait(f, cs, pod) - Expect(err).NotTo(HaveOccurred(), "in deleting the pod") + framework.ExpectNoError(err, "in deleting the pod") // Wait a bit for node to sync the volume status time.Sleep(30 * time.Second) By("waiting for volume-in-use on the node after pod deletion") err = waitForVolumesInUse(cs, node.Name, uniqueVolumeName) - Expect(err).NotTo(HaveOccurred(), "while waiting for volume in use") + framework.ExpectNoError(err, "while waiting for volume in use") // Wait for 110s because mount device operation has a sleep of 120 seconds // we previously already waited for 30s. @@ -112,7 +111,7 @@ var _ = utils.SIGDescribe("Detaching volumes", func() { By("waiting for volume to disappear from node in-use") err = waitForVolumesNotInUse(cs, node.Name, uniqueVolumeName) - Expect(err).NotTo(HaveOccurred(), "while waiting for volume to be removed from in-use") + framework.ExpectNoError(err, "while waiting for volume to be removed from in-use") By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) uninstallFlex(cs, &node, "k8s", driverInstallAs) diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index d3204aed6a6..48373f660aa 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -310,7 +310,7 @@ func deleteConfigMaps(f *framework.Framework, configMapNames []string) { By("Cleaning up the configMaps") for _, configMapName := range configMapNames { err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil) - Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName) + framework.ExpectNoError(err, "unable to delete configMap %v", configMapName) } } @@ -403,7 +403,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume }, } _, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rc) - Expect(err).NotTo(HaveOccurred(), "error creating replication controller") + framework.ExpectNoError(err, "error creating replication controller") defer func() { err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) @@ -421,6 +421,6 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume continue } err = f.WaitForPodRunning(pod.Name) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for pod %s to enter running state", pod.Name) + framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name) } } diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index 6f1f9966766..d7ce179e9af 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -30,7 +30,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) var ( @@ -55,7 +54,7 @@ var _ = utils.SIGDescribe("Ephemeralstorage", func() { It(fmt.Sprintf("should allow deletion of pod with invalid volume : %s", testSource.volumeType), func() { pod := testEphemeralVolumePod(f, testSource.volumeType, testSource.source) pod, err := c.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) // Allow it to sleep for 30 seconds time.Sleep(30 * time.Second) diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 92791c1b784..5844c6b1814 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -85,13 +85,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { if err != nil { fmt.Printf("storage class creation error: %v\n", err) } - Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class") + framework.ExpectNoError(err, "Error creating resizable storage class") Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) pvc = getClaim("2Gi", ns) pvc.Spec.StorageClassName = &resizableSc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + framework.ExpectNoError(err, "Error creating pvc") }) framework.AddCleanupAction(func() { @@ -132,25 +132,25 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { }) pv, err = framework.CreatePV(c, pv) - Expect(err).NotTo(HaveOccurred(), "Error creating pv %v", err) + framework.ExpectNoError(err, "Error creating pv %v", err) By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) By("Creating a deployment with the provisioned volume") deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") - Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err) + framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) - Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") + framework.ExpectNoError(err, "While updating pvc for more size") Expect(pvc).NotTo(BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] @@ -160,7 +160,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) - Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish") + framework.ExpectNoError(err, "While waiting for pvc resize to finish") By("Getting a pod from deployment") podList, err := framework.GetPodsForDeployment(c, deployment) @@ -169,15 +169,15 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { By("Deleting the pod from deployment") err = framework.DeletePodWithWait(f, c, &pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing") + framework.ExpectNoError(err, "while deleting pod for resizing") By("Waiting for deployment to create new pod") pod, err = waitForDeploymentToRecreatePod(c, deployment) - Expect(err).NotTo(HaveOccurred(), "While waiting for pod to be recreated") + framework.ExpectNoError(err, "While waiting for pod to be recreated") By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) - Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish") + framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index da88966cf19..23bb27cdb2c 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -84,13 +84,13 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa if err != nil { fmt.Printf("storage class creation error: %v\n", err) } - Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class: %v", err) + framework.ExpectNoError(err, "Error creating resizable storage class: %v", err) Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) pvc = getClaim("2Gi", ns) pvc.Spec.StorageClassName = &resizableSc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc: %v", err) + framework.ExpectNoError(err, "Error creating pvc: %v", err) }) @@ -132,30 +132,30 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa }) pv, err = framework.CreatePV(c, pv) - Expect(err).NotTo(HaveOccurred(), "Error creating pv %v", err) + framework.ExpectNoError(err, "Error creating pv %v", err) By("Waiting for PVC to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} var pvs []*v1.PersistentVolume pvs, err = framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) var pod *v1.Pod By("Creating pod") pod, err = framework.CreateNginxPod(c, ns, nodeKeyValueLabel, pvcClaims) - Expect(err).NotTo(HaveOccurred(), "Failed to create pod %v", err) + framework.ExpectNoError(err, "Failed to create pod %v", err) defer framework.DeletePodWithWait(f, c, pod) By("Waiting for pod to go to 'running' state") err = f.WaitForPodRunning(pod.ObjectMeta.Name) - Expect(err).NotTo(HaveOccurred(), "Pod didn't go to 'running' state %v", err) + framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err) By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) - Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") + framework.ExpectNoError(err, "While updating pvc for more size") Expect(pvc).NotTo(BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] @@ -165,11 +165,11 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) - Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish") + framework.ExpectNoError(err, "While waiting for pvc resize to finish") By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) - Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish") + framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 8b428f6f204..39468ca7987 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -92,14 +92,14 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string } pvc := newClaim(test, ns, "default") pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + framework.ExpectNoError(err, "Error creating pvc") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) By("Creating a pod with dynamically provisioned volume") pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims) - Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test") + framework.ExpectNoError(err, "While creating pods for kubelet restart test") return pod, pvc, pvs[0] } diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 20ba0a64edf..5633e5b8bac 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -80,13 +80,13 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { DelayBinding: true, } resizableSc, err = createStorageClass(test, ns, "resizing", c) - Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class") + framework.ExpectNoError(err, "Error creating resizable storage class") Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) pvc = newClaim(test, ns, "default") pvc.Spec.StorageClassName = &resizableSc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + framework.ExpectNoError(err, "Error creating pvc") }) framework.AddCleanupAction(func() { @@ -115,19 +115,19 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { // We should consider adding a unit test that exercises this better. By("Creating a deployment with selected PVC") deployment, err := framework.CreateDeployment(c, int32(1), map[string]string{"test": "app"}, nodeKeyValueLabel, ns, pvcClaims, "") - Expect(err).NotTo(HaveOccurred(), "Failed creating deployment %v", err) + framework.ExpectNoError(err, "Failed creating deployment %v", err) defer c.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{}) // PVC should be bound at this point By("Checking for bound PVC") pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) - Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") + framework.ExpectNoError(err, "While updating pvc for more size") Expect(pvc).NotTo(BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] @@ -137,7 +137,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) - Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish") + framework.ExpectNoError(err, "While waiting for pvc resize to finish") By("Getting a pod from deployment") podList, err := framework.GetPodsForDeployment(c, deployment) @@ -146,15 +146,15 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { By("Deleting the pod from deployment") err = framework.DeletePodWithWait(f, c, &pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing") + framework.ExpectNoError(err, "while deleting pod for resizing") By("Waiting for deployment to create new pod") pod, err = waitForDeploymentToRecreatePod(c, deployment) - Expect(err).NotTo(HaveOccurred(), "While waiting for pod to be recreated") + framework.ExpectNoError(err, "While waiting for pod to be recreated") By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) - Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish") + framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index f081809dab8..2c40488ddd5 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -121,7 +121,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { By("Initializing first PD with PVPVC binding") pvSource1, diskName1 = framework.CreateGCEVolume() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pvConfig1 = framework.PersistentVolumeConfig{ NamePrefix: "gce-", Labels: volLabel, @@ -129,12 +129,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { Prebind: nil, } pv1, pvc1, err = framework.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv1, pvc1)) By("Initializing second PD with PVPVC binding") pvSource2, diskName2 = framework.CreateGCEVolume() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pvConfig2 = framework.PersistentVolumeConfig{ NamePrefix: "gce-", Labels: volLabel, @@ -142,12 +142,12 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { Prebind: nil, } pv2, pvc2, err = framework.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2)) By("Attaching both PVC's to a single pod") clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) AfterEach(func() { @@ -176,20 +176,20 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() { By("Deleting PVC for volume 2") err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pvc2 = nil By("Restarting the kube-controller-manager") err = framework.RestartControllerManager() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForControllerManagerUp() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.Logf("kube-controller-manager restarted") By("Observing the kube-controller-manager healthy for at least 2 minutes") // Continue checking for 2 minutes to make sure kube-controller-manager is healthy err = framework.CheckForControllerManagerHealthy(2 * time.Minute) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) }) }) @@ -252,27 +252,27 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew framework.DeletePersistentVolume(c, pv.Name) } }() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pod := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "") pod.Spec.NodeName = nodeName framework.Logf("Creating NFS client pod.") pod, err = c.CoreV1().Pods(ns).Create(pod) framework.Logf("NFS client Pod %q created on Node %q", pod.Name, nodeName) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) defer func() { if err != nil { framework.DeletePodWithWait(f, c, pod) } }() err = framework.WaitForPodRunningInNamespace(c, pod) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) + framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) // Return created api objects pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) return pod, pv, pvc } diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 5887336d658..e1234988fb4 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -424,7 +424,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { return true, nil } }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout)) + framework.ExpectNoError(err, fmt.Sprintf("failed to evict host0Pod after %v", podEvictTimeout)) } By("waiting for pd to detach from host0") diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index e3a6b422725..1019adf902a 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -33,9 +33,9 @@ import ( // verifyGCEDiskAttached performs a sanity check to verify the PD attached to the node func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) return isAttached } @@ -43,12 +43,12 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool { func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig framework.PersistentVolumeConfig, pvcConfig framework.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { By("Creating the PV and PVC") pv, pvc, err := framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) By("Creating the Client Pod") clientPod, err := framework.CreateClientPod(c, ns, pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) return clientPod, pv, pvc } @@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { framework.SkipUnlessProviderIs("gce", "gke") By("Initializing Test Spec") diskName, err = framework.CreatePDWithRetry() - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pvConfig = framework.PersistentVolumeConfig{ NamePrefix: "gce-", Labels: volLabel, @@ -152,10 +152,10 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(ns, nil) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, framework.DefaultNamespaceDeletionTimeout) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) By("Verifying Persistent Disk detaches") framework.ExpectNoError(waitForPDDetach(diskName, node), "PD ", diskName, " did not detach") diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index e6c0618e932..071dd4ef283 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -212,7 +212,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { BeforeEach(func() { By("Creating pod1") pod1, pod1Err = createLocalPod(config, testVol, nil) - Expect(pod1Err).NotTo(HaveOccurred()) + framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -287,7 +287,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod1 := createPodWithFsGroupTest(config, testVol, fsGroup1, fsGroup1) By("Deleting first pod") err := framework.DeletePodWithWait(f, config.client, pod1) - Expect(err).NotTo(HaveOccurred(), "while deleting first pod") + framework.ExpectNoError(err, "while deleting first pod") By("Create second pod and check fsGroup is the new one") pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) By("Deleting second pod") @@ -328,7 +328,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod := makeLocalPodWithNodeName(config, testVol, config.nodes[1].Name) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) Expect(err).To(HaveOccurred()) @@ -554,7 +554,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod := framework.MakeSecPod(config.ns, pvcs, false, "sleep 1", false, false, selinuxLabel, nil) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pods[pod.Name] = pod numCreated++ } @@ -600,7 +600,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { return numFinished == totalPods, nil }) - Expect(err).ToNot(HaveOccurred()) + framework.ExpectNoError(err) }) }) @@ -647,7 +647,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { for i := 0; i < count; i++ { pod := framework.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, selinuxLabel, nil) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) pods[pod.Name] = pod } By("Wait for all pods are running") @@ -665,7 +665,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { } return runningPods == count, nil }) - Expect(err).ToNot(HaveOccurred()) + framework.ExpectNoError(err) }) }) }) @@ -697,10 +697,10 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp pod := makeLocalPodFunc(config, testVol, nodeName) pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } // The tests below are run against multiple mount point types @@ -709,7 +709,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { By("Creating pod1 to write to the PV") pod1, pod1Err := createLocalPod(config, testVol, nil) - Expect(pod1Err).NotTo(HaveOccurred()) + framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -722,7 +722,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { By("Creating pod2 to read from the PV") pod2, pod2Err := createLocalPod(config, testVol, nil) - Expect(pod2Err).NotTo(HaveOccurred()) + framework.ExpectNoError(pod2Err) verifyLocalPod(config, testVol, pod2, config.node0.Name) // testFileContent was written after creating pod1 @@ -746,7 +746,7 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolume) { By("Creating pod1") pod1, pod1Err := createLocalPod(config, testVol, nil) - Expect(pod1Err).NotTo(HaveOccurred()) + framework.ExpectNoError(pod1Err) verifyLocalPod(config, testVol, pod1, config.node0.Name) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) @@ -762,7 +762,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum By("Creating pod2") pod2, pod2Err := createLocalPod(config, testVol, nil) - Expect(pod2Err).NotTo(HaveOccurred()) + framework.ExpectNoError(pod2Err) verifyLocalPod(config, testVol, pod2, config.node0.Name) By("Reading in pod2") @@ -777,7 +777,7 @@ func createPodWithFsGroupTest(config *localTestConfig, testVol *localTestVolume, pod, err := createLocalPod(config, testVol, &fsGroup) framework.ExpectNoError(err) _, err = framework.LookForStringInPodExec(config.ns, pod.Name, []string{"stat", "-c", "%g", volumeDir}, strconv.FormatInt(expectedFsGroup, 10), time.Second*3) - Expect(err).NotTo(HaveOccurred(), "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name) + framework.ExpectNoError(err, "failed to get expected fsGroup %d on directory %s in pod %s", fsGroup, volumeDir, pod.Name) return pod } @@ -791,7 +791,7 @@ func setupStorageClass(config *localTestConfig, mode *storagev1.VolumeBindingMod } _, err := config.client.StorageV1().StorageClasses().Create(sc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } func cleanupStorageClass(config *localTestConfig) { @@ -844,7 +844,7 @@ func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) { func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) { podNodeName, err := podNodeName(config, pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.Logf("pod %q created on Node %q", pod.Name, podNodeName) Expect(podNodeName).To(Equal(expectedNodeName)) } @@ -1031,7 +1031,7 @@ func testReadFileContent(testFileDir string, testFile string, testFileContent st func podRWCmdExec(pod *v1.Pod, cmd string) string { out, err := utils.PodExec(pod, cmd) framework.Logf("podRWCmdExec out: %q err: %v", out, err) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) return out } @@ -1148,7 +1148,7 @@ func createStatefulSet(config *localTestConfig, ssReplicas int32, volumeCount in } ss, err := config.client.AppsV1().StatefulSets(config.ns).Create(spec) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) config.ssTester.WaitForRunningAndReady(ssReplicas, ss) return ss @@ -1177,7 +1177,7 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b if pvcSource != nil { err := framework.WaitForPersistentVolumeClaimPhase( v1.ClaimBound, config.client, config.ns, pvcSource.ClaimName, framework.Poll, time.Second) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } } } @@ -1188,9 +1188,9 @@ func validateStatefulSet(config *localTestConfig, ss *appsv1.StatefulSet, anti b func SkipUnlessLocalSSDExists(config *localTestConfig, ssdInterface, filesystemType string, node *v1.Node) { ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType) res, err := config.hostExec.IssueCommandWithResult(ssdCmd, node) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) num, err := strconv.Atoi(strings.TrimSpace(res)) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) if num < 1 { framework.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType) } diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index edafc6ecf24..09e7515bed3 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -22,7 +22,6 @@ import ( "time" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -164,7 +163,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // that the pod can write to the nfs volume. It("should create a non-pre-bound PV and PVC: test write access ", func() { pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -173,7 +172,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // correctly, and that the pod can write to the nfs volume. It("create a PVC and non-pre-bound PV: test write access", func() { pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, false) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -182,7 +181,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // correctly, and that the pod can write to the nfs volume. It("create a PVC and a pre-bound PV: test write access", func() { pv, pvc, err = framework.CreatePVCPV(c, pvConfig, pvcConfig, ns, true) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) @@ -191,7 +190,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // correctly, and that the pod can write to the nfs volume. It("create a PV and a pre-bound PVC: test write access", func() { pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) completeTest(f, c, ns, pv, pvc) }) }) @@ -229,7 +228,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { It("should create 2 PVs and 4 PVCs: test write access", func() { numPVs, numPVCs := 2, 4 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) }) @@ -239,7 +238,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { It("should create 3 PVs and 3 PVCs: test write access", func() { numPVs, numPVCs := 3, 3 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) }) @@ -249,7 +248,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { It("should create 4 PVs and 2 PVCs: test write access [Slow]", func() { numPVs, numPVCs := 4, 2 pvols, claims, err = framework.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) }) @@ -262,7 +261,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { BeforeEach(func() { pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) - Expect(err).NotTo(HaveOccurred(), "BeforeEach: Failed to create PV/PVC") + framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") }) @@ -280,7 +279,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { By("Writing to the volume.") pod := framework.MakeWritePod(ns, pvc) pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) By("Deleting the claim") @@ -290,7 +289,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { By("Re-mounting the volume.") pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) pvc, err = framework.CreatePVC(c, ns, pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name) // If a file is detected in /mnt, fail the pod and do not restart it. @@ -298,7 +297,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) framework.Logf("Pod exited without failure; the volume has been recycled.") diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index d94aa6effe3..5b3c8851dc6 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -77,14 +77,14 @@ var _ = utils.SIGDescribe("PV Protection", func() { pv = framework.MakePersistentVolume(pvConfig) // create the PV pv, err = client.CoreV1().PersistentVolumes().Create(pv) - Expect(err).NotTo(HaveOccurred(), "Error creating PV") + framework.ExpectNoError(err, "Error creating PV") By("Waiting for PV to enter phase Available") framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeAvailable, client, pv.Name, 1*time.Second, 30*time.Second)) By("Checking that PV Protection finalizer is set") pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While getting PV status") + framework.ExpectNoError(err, "While getting PV status") Expect(slice.ContainsString(pv.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)).To(BeTrue(), "PV Protection finalizer(%v) is not set in %v", volumeutil.PVProtectionFinalizer, pv.ObjectMeta.Finalizers) }) @@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { It("Verify \"immediate\" deletion of a PV that is not bound to a PVC", func() { By("Deleting the PV") err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PV") + framework.ExpectNoError(err, "Error deleting PV") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) }) @@ -106,24 +106,24 @@ var _ = utils.SIGDescribe("PV Protection", func() { By("Creating a PVC") pvc = framework.MakePersistentVolumeClaim(pvcConfig, nameSpace) pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating PVC") + framework.ExpectNoError(err, "Error creating PVC") By("Waiting for PVC to become Bound") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") err = client.CoreV1().PersistentVolumes().Delete(pv.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PV") + framework.ExpectNoError(err, "Error deleting PV") By("Checking that the PV status is Terminating") pv, err = client.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While checking PV status") + framework.ExpectNoError(err, "While checking PV status") Expect(pv.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) By("Deleting the PVC that is bound to the PV") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + framework.ExpectNoError(err, "Error deleting PVC") By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") framework.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, framework.PVDeletingTimeout) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 64011df46fd..64731cbf767 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -55,21 +55,21 @@ var _ = utils.SIGDescribe("PVC Protection", func() { pvc = newClaim(testStorageClass, nameSpace, suffix) pvc.Spec.StorageClassName = &defaultSC pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating PVC") + framework.ExpectNoError(err, "Error creating PVC") pvcCreatedAndNotDeleted = true By("Creating a Pod that becomes Running and therefore is actively using the PVC") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pod, err = framework.CreatePod(client, nameSpace, nil, pvcClaims, false, "") - Expect(err).NotTo(HaveOccurred(), "While creating pod that uses the PVC or waiting for the Pod to become Running") + framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") By("Waiting for PVC to become Bound") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, framework.ClaimBindingTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) By("Checking that PVC Protection finalizer is set") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While getting PVC status") + framework.ExpectNoError(err, "While getting PVC status") Expect(slice.ContainsString(pvc.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)).To(BeTrue(), "PVC Protection finalizer(%v) is not set in %v", volumeutil.PVCProtectionFinalizer, pvc.ObjectMeta.Finalizers) }) @@ -82,11 +82,11 @@ var _ = utils.SIGDescribe("PVC Protection", func() { It("Verify \"immediate\" deletion of a PVC that is not in active use by a pod", func() { By("Deleting the pod using the PVC") err = framework.DeletePodWithWait(f, client, pod) - Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod") + framework.ExpectNoError(err, "Error terminating and deleting pod") By("Deleting the PVC") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + framework.ExpectNoError(err, "Error deleting PVC") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) pvcCreatedAndNotDeleted = false }) @@ -94,16 +94,16 @@ var _ = utils.SIGDescribe("PVC Protection", func() { It("Verify that PVC in active use by a pod is not removed immediately", func() { By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + framework.ExpectNoError(err, "Error deleting PVC") By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While checking PVC status") + framework.ExpectNoError(err, "While checking PVC status") Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) By("Deleting the pod that uses the PVC") err = framework.DeletePodWithWait(f, client, pod) - Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod") + framework.ExpectNoError(err, "Error terminating and deleting pod") By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) @@ -113,29 +113,29 @@ var _ = utils.SIGDescribe("PVC Protection", func() { It("Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", func() { By("Deleting the PVC, however, the PVC must not be removed from the system as it's in active use by a pod") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "Error deleting PVC") + framework.ExpectNoError(err, "Error deleting PVC") By("Checking that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While checking PVC status") + framework.ExpectNoError(err, "While checking PVC status") Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) By("Creating second Pod whose scheduling fails because it uses a PVC that is being deleted") secondPod, err2 := framework.CreateUnschedulablePod(client, nameSpace, nil, []*v1.PersistentVolumeClaim{pvc}, false, "") - Expect(err2).NotTo(HaveOccurred(), "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") + framework.ExpectNoError(err2, "While creating second pod that uses a PVC that is being deleted and that is Unschedulable") By("Deleting the second pod that uses the PVC that is being deleted") err = framework.DeletePodWithWait(f, client, secondPod) - Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod") + framework.ExpectNoError(err, "Error terminating and deleting pod") By("Checking again that the PVC status is Terminating") pvc, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While checking PVC status") + framework.ExpectNoError(err, "While checking PVC status") Expect(pvc.ObjectMeta.DeletionTimestamp).NotTo(Equal(nil)) By("Deleting the first pod that uses the PVC") err = framework.DeletePodWithWait(f, client, pod) - Expect(err).NotTo(HaveOccurred(), "Error terminating and deleting pod") + framework.ExpectNoError(err, "Error terminating and deleting pod") By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") framework.WaitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 6c360572e91..cf67f00c355 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -113,9 +113,9 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { Expect(volume).NotTo(BeNil()) err := checkGCEPD(volume, "pd-standard") - Expect(err).NotTo(HaveOccurred(), "checkGCEPD") + framework.ExpectNoError(err, "checkGCEPD") err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */) - Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") + framework.ExpectNoError(err, "verifyZonesInPV") }, }, @@ -134,11 +134,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { Expect(volume).NotTo(BeNil()) err := checkGCEPD(volume, "pd-standard") - Expect(err).NotTo(HaveOccurred(), "checkGCEPD") + framework.ExpectNoError(err, "checkGCEPD") zones, err := framework.GetClusterZones(c) - Expect(err).NotTo(HaveOccurred(), "GetClusterZones") + framework.ExpectNoError(err, "GetClusterZones") err = verifyZonesInPV(volume, zones, false /* match */) - Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV") + framework.ExpectNoError(err, "verifyZonesInPV") }, }, } @@ -173,7 +173,7 @@ func testZonalFailover(c clientset.Interface, ns string) { By("creating a StorageClass " + class.Name) _, err := c.StorageV1().StorageClasses().Create(class) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) defer func() { framework.Logf("deleting storage class %s", class.Name) framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil), @@ -182,9 +182,9 @@ func testZonalFailover(c clientset.Interface, ns string) { By("creating a StatefulSet") _, err = c.CoreV1().Services(ns).Create(service) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) _, err = c.AppsV1().StatefulSets(ns).Create(statefulSet) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) defer func() { framework.Logf("deleting statefulset%q/%q", statefulSet.Namespace, statefulSet.Name) @@ -209,7 +209,7 @@ func testZonalFailover(c clientset.Interface, ns string) { pod := getPod(c, ns, regionalPDLabels) Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), "The statefulset pod has the following conditions: %s", pod.Status.Conditions) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } pvc := getPVC(c, ns, regionalPDLabels) @@ -254,14 +254,14 @@ func testZonalFailover(c clientset.Interface, ns string) { newPodZone := node.Labels[v1.LabelZoneFailureDomain] return newPodZone == otherZone, nil }) - Expect(err).NotTo(HaveOccurred(), "Error waiting for pod to be scheduled in a different zone (%q): %v", otherZone, err) + framework.ExpectNoError(err, "Error waiting for pod to be scheduled in a different zone (%q): %v", otherZone, err) err = framework.WaitForStatefulSetReplicasReady(statefulSet.Name, ns, c, 3*time.Second, framework.RestartPodReadyAgainTimeout) if err != nil { pod := getPod(c, ns, regionalPDLabels) Expect(podutil.IsPodReadyConditionTrue(pod.Status)).To(BeTrue(), "The statefulset pod has the following conditions: %s", pod.Status.Conditions) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) } By("verifying the same PVC is used by the new pod") @@ -270,7 +270,7 @@ func testZonalFailover(c clientset.Interface, ns string) { By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") logs, err := framework.GetPodLogs(c, ns, pod.Name, "") - Expect(err).NotTo(HaveOccurred(), + framework.ExpectNoError(err, "Error getting logs from pod %s in namespace %s", pod.Name, ns) lineCount := len(strings.Split(strings.TrimSpace(logs), "\n")) expectedLineCount := 2 @@ -283,7 +283,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) reversePatches := make(map[string][]byte) for _, node := range nodes { oldData, err := json.Marshal(node) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) node.Spec.Taints = append(node.Spec.Taints, v1.Taint{ Key: taintKeyPrefix + ns, @@ -292,13 +292,13 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) }) newData, err := json.Marshal(node) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) reversePatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) reversePatches[node.Name] = reversePatchBytes _, err = c.CoreV1().Nodes().Patch(node.Name, types.StrategicMergePatchType, patchBytes) @@ -421,7 +421,7 @@ func getPVC(c clientset.Interface, ns string, pvcLabels map[string]string) *v1.P selector := labels.Set(pvcLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(options) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(len(pvcList.Items)).To(Equal(1), "There should be exactly 1 PVC matched.") return &pvcList.Items[0] @@ -431,7 +431,7 @@ func getPod(c clientset.Interface, ns string, podLabels map[string]string) *v1.P selector := labels.Set(podLabels).AsSelector() options := metav1.ListOptions{LabelSelector: selector.String()} podList, err := c.CoreV1().Pods(ns).List(options) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(len(podList.Items)).To(Equal(1), "There should be exactly 1 pod matched.") return &podList.Items[0] diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index 238af4d6737..c8d6218b1c2 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) var _ = utils.SIGDescribe("Subpath", func() { @@ -40,13 +39,13 @@ var _ = utils.SIGDescribe("Subpath", func() { secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}} secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) if err != nil && !apierrors.IsAlreadyExists(err) { - Expect(err).ToNot(HaveOccurred(), "while creating secret") + framework.ExpectNoError(err, "while creating secret") } configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}} configmap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap) if err != nil && !apierrors.IsAlreadyExists(err) { - Expect(err).ToNot(HaveOccurred(), "while creating configmap") + framework.ExpectNoError(err, "while creating configmap") } }) diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go index b67901b6f99..6263ca247ac 100644 --- a/test/e2e/storage/volume_expand.go +++ b/test/e2e/storage/volume_expand.go @@ -61,14 +61,14 @@ var _ = utils.SIGDescribe("Volume expand", func() { AllowVolumeExpansion: true, } resizableSc, err = createStorageClass(test, ns, "resizing", c) - Expect(err).NotTo(HaveOccurred(), "Error creating resizable storage class") + framework.ExpectNoError(err, "Error creating resizable storage class") Expect(resizableSc.AllowVolumeExpansion).NotTo(BeNil()) Expect(*resizableSc.AllowVolumeExpansion).To(BeTrue()) pvc = newClaim(test, ns, "default") pvc.Spec.StorageClassName = &resizableSc.Name pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + framework.ExpectNoError(err, "Error creating pvc") }) AfterEach(func() { @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { ClaimSize: "2Gi", } regularSC, err := createStorageClass(test, ns, "noexpand", c) - Expect(err).NotTo(HaveOccurred(), "Error creating non-expandable storage class") + framework.ExpectNoError(err, "Error creating non-expandable storage class") defer func() { framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(regularSC.Name, nil)) @@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { noExpandPVC := newClaim(test, ns, "noexpand") noExpandPVC.Spec.StorageClassName = ®ularSC.Name noExpandPVC, err = c.CoreV1().PersistentVolumeClaims(noExpandPVC.Namespace).Create(noExpandPVC) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + framework.ExpectNoError(err, "Error creating pvc") defer func() { framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, noExpandPVC.Name, noExpandPVC.Namespace)) @@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { pvcClaims := []*v1.PersistentVolumeClaim{noExpandPVC} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) By("Expanding non-expandable pvc") @@ -113,21 +113,21 @@ var _ = utils.SIGDescribe("Volume expand", func() { By("Waiting for pvc to be in bound phase") pvcClaims := []*v1.PersistentVolumeClaim{pvc} pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) Expect(len(pvs)).To(Equal(1)) By("Creating a pod with dynamically provisioned volume") pod, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") - Expect(err).NotTo(HaveOccurred(), "While creating pods for resizing") + framework.ExpectNoError(err, "While creating pods for resizing") defer func() { err = framework.DeletePodWithWait(f, c, pod) - Expect(err).NotTo(HaveOccurred(), "while cleaning up pod already deleted in resize test") + framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") }() By("Expanding current pvc") newSize := resource.MustParse("6Gi") pvc, err = expandPVCSize(pvc, newSize, c) - Expect(err).NotTo(HaveOccurred(), "While updating pvc for more size") + framework.ExpectNoError(err, "While updating pvc for more size") Expect(pvc).NotTo(BeNil()) pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] @@ -137,11 +137,11 @@ var _ = utils.SIGDescribe("Volume expand", func() { By("Waiting for cloudprovider resize to finish") err = waitForControllerVolumeResize(pvc, c, totalResizeWaitPeriod) - Expect(err).NotTo(HaveOccurred(), "While waiting for pvc resize to finish") + framework.ExpectNoError(err, "While waiting for pvc resize to finish") By("Checking for conditions on pvc") pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "While fetching pvc after controller resize") + framework.ExpectNoError(err, "While fetching pvc after controller resize") inProgressConditions := pvc.Status.Conditions Expect(len(inProgressConditions)).To(Equal(1), "pvc must have file system resize pending condition") @@ -149,19 +149,19 @@ var _ = utils.SIGDescribe("Volume expand", func() { By("Deleting the previously created pod") err = framework.DeletePodWithWait(f, c, pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod for resizing") + framework.ExpectNoError(err, "while deleting pod for resizing") By("Creating a new pod with same volume") pod2, err := framework.CreatePod(c, ns, nil, pvcClaims, false, "") - Expect(err).NotTo(HaveOccurred(), "while recreating pod for resizing") + framework.ExpectNoError(err, "while recreating pod for resizing") defer func() { err = framework.DeletePodWithWait(f, c, pod2) - Expect(err).NotTo(HaveOccurred(), "while cleaning up pod before exiting resizing test") + framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") }() By("Waiting for file system resize to finish") pvc, err = waitForFSResize(pvc, c) - Expect(err).NotTo(HaveOccurred(), "while waiting for fs resize to finish") + framework.ExpectNoError(err, "while waiting for fs resize to finish") pvcConditions := pvc.Status.Conditions Expect(len(pvcConditions)).To(Equal(0), "pvc should not have conditions") diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 06a83ccb178..f0a0177e4a0 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -80,19 +80,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { controllerMetrics, err := metricsGrabber.GrabFromControllerManager() - Expect(err).NotTo(HaveOccurred(), "Error getting c-m metrics : %v", err) + framework.ExpectNoError(err, "Error getting c-m metrics : %v", err) storageOpMetrics := getControllerStorageMetrics(controllerMetrics) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(pvc).ToNot(Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) @@ -114,19 +114,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { It("should create volume metrics with the correct PVC ref", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(pvc).ToNot(Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) // Verify volume stat metrics were collected for the referenced PVC volumeStatKeys := []string{ @@ -156,7 +156,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { } return true, nil }) - Expect(waitErr).NotTo(HaveOccurred(), "Error finding volume metrics : %v", waitErr) + framework.ExpectNoError(waitErr, "Error finding volume metrics : %v", waitErr) for _, key := range volumeStatKeys { kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) @@ -171,19 +171,19 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { It("should create metrics for total time taken in volume operations in P/V Controller", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(pvc).ToNot(Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) controllerMetrics, err := metricsGrabber.GrabFromControllerManager() if err != nil { @@ -202,22 +202,22 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { It("should create volume metrics in Volume Manager", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(pvc).ToNot(Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} pod := framework.MakePod(ns, nil, claims, false, "") pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) kubeMetrics, err := metricsGrabber.GrabFromKubelet(pod.Spec.NodeName) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) // Metrics should have dimensions plugin_name and state available totalVolumesKey := "volume_manager_total_volumes" @@ -232,7 +232,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { It("should create metrics for total number of volumes in A/D Controller", func() { var err error pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) Expect(pvc).ToNot(Equal(nil)) claims := []*v1.PersistentVolumeClaim{pvc} @@ -246,11 +246,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Create pod pod, err = c.CoreV1().Pods(ns).Create(pod) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + framework.ExpectNoError(err) // Get updated metrics updatedControllerMetrics, err := metricsGrabber.GrabFromControllerManager() @@ -340,7 +340,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { "Wrong metric size: %d", len(metricValues)) controllerMetrics, err := metricsGrabber.GrabFromControllerManager() - Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err) + framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) for i, metric := range metrics { expectValues := metricValues[i] @@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Initializes all original metric values. controllerMetrics, err := metricsGrabber.GrabFromControllerManager() - Expect(err).NotTo(HaveOccurred(), "Error getting c-m metricValues: %v", err) + framework.ExpectNoError(err, "Error getting c-m metricValues: %v", err) for _, metric := range metrics { originMetricValues = append(originMetricValues, getPVControllerMetrics(controllerMetrics, metric.name, metric.dimension)) @@ -395,7 +395,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { func() { var err error pv, err = framework.CreatePV(c, pv) - Expect(err).NotTo(HaveOccurred(), "Error creating pv: %v", err) + framework.ExpectNoError(err, "Error creating pv: %v", err) waitForPVControllerSync(metricsGrabber, unboundPVKey, classKey) validator([]map[string]int64{nil, {className: 1}, nil, nil}) }) @@ -404,7 +404,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { func() { var err error pvc, err = framework.CreatePVC(c, ns, pvc) - Expect(err).NotTo(HaveOccurred(), "Error creating pvc: %v", err) + framework.ExpectNoError(err, "Error creating pvc: %v", err) waitForPVControllerSync(metricsGrabber, unboundPVCKey, namespaceKey) validator([]map[string]int64{nil, nil, nil, {ns: 1}}) }) @@ -413,7 +413,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { func() { var err error pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, true) - Expect(err).NotTo(HaveOccurred(), "Error creating pv pvc: %v", err) + framework.ExpectNoError(err, "Error creating pv pvc: %v", err) waitForPVControllerSync(metricsGrabber, boundPVKey, classKey) waitForPVControllerSync(metricsGrabber, boundPVCKey, namespaceKey) validator([]map[string]int64{{className: 1}, nil, {ns: 1}, nil}) @@ -460,7 +460,7 @@ func waitForDetachAndGrabMetrics(oldMetrics map[string]int64, metricsGrabber *me } waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) - Expect(waitErr).NotTo(HaveOccurred(), "Timeout error fetching storage c-m metrics : %v", waitErr) + framework.ExpectNoError(waitErr, "Timeout error fetching storage c-m metrics : %v", waitErr) return updatedStorageMetrics } @@ -542,7 +542,7 @@ func waitForPVControllerSync(metricsGrabber *metrics.Grabber, metricName, dimens return len(getPVControllerMetrics(updatedMetrics, metricName, dimension)) > 0, nil } waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) - Expect(waitErr).NotTo(HaveOccurred(), + framework.ExpectNoError(waitErr, "Timeout error fetching pv controller metrics : %v", waitErr) } @@ -632,5 +632,5 @@ func waitForADControllerStatesMetrics(metricsGrabber *metrics.Grabber, metricNam return true, nil } waitErr := wait.ExponentialBackoff(backoff, verifyMetricFunc) - Expect(waitErr).NotTo(HaveOccurred(), "Timeout error fetching A/D controller metrics : %v", waitErr) + framework.ExpectNoError(waitErr, "Timeout error fetching A/D controller metrics : %v", waitErr) }