From 950f6e868caa0384c21b572961f6922817e31c9d Mon Sep 17 00:00:00 2001 From: draveness Date: Thu, 9 May 2019 09:48:02 +0800 Subject: [PATCH] refactor: use framework.ExpectNoError instead --- test/e2e/common/pods.go | 6 +++--- test/e2e/upgrades/apps/job.go | 7 +++---- test/e2e/upgrades/apps/statefulset.go | 5 ++--- test/e2e/upgrades/storage/BUILD | 1 - test/e2e/upgrades/storage/persistent_volumes.go | 3 +-- test/e2e/upgrades/storage/volume_mode.go | 11 +++++------ 6 files changed, 14 insertions(+), 19 deletions(-) diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index 32a20d19778..c2e7af9f452 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -814,19 +814,19 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1)) _, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) // Sleep for 10 seconds. time.Sleep(maxReadyStatusUpdateTolerance) gomega.Expect(podClient.PodIsReady(podName)).To(gomega.BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True") ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) validatePodReadiness(true) ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1)) _, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status") - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) validatePodReadiness(false) }) diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index cd836e75c05..dc955003f02 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -24,7 +24,6 @@ import ( "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) // JobUpgradeTest is a test harness for batch Jobs. @@ -44,11 +43,11 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { t.job = jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6) job, err := jobutil.CreateJob(f.ClientSet, t.namespace, t.job) t.job = job - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Ensuring active pods == parallelism") err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } // Test verifies that the Jobs Pods are running after the an upgrade @@ -56,7 +55,7 @@ func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgr <-done ginkgo.By("Ensuring active pods == parallelism") err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) } // Teardown cleans up any remaining resources. diff --git a/test/e2e/upgrades/apps/statefulset.go b/test/e2e/upgrades/apps/statefulset.go index bc476f9036a..7a9a11e9389 100644 --- a/test/e2e/upgrades/apps/statefulset.go +++ b/test/e2e/upgrades/apps/statefulset.go @@ -18,7 +18,6 @@ package upgrades import ( "github.com/onsi/ginkgo" - "github.com/onsi/gomega" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" @@ -69,12 +68,12 @@ func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns) _, err := f.ClientSet.CoreV1().Services(ns).Create(t.service) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns) *(t.set.Spec.Replicas) = 3 _, err = f.ClientSet.AppsV1().StatefulSets(ns).Create(t.set) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Saturating stateful set " + t.set.Name) t.tester.Saturate(t.set) diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index 4b28af94fad..8044b3d8846 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -22,7 +22,6 @@ go_library( "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", ], ) diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index dbcc331136d..9d12139680c 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/test/e2e/framework/volume" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/upgrades" ) @@ -69,7 +68,7 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating the PV and PVC") t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc)) ginkgo.By("Consuming the PV before upgrade") diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index f77b2cef498..da4048bcd72 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const devicePath = "/mnt/volume1" @@ -82,20 +81,20 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { } t.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns) t.pvc, err = framework.CreatePVC(cs, ns, t.pvc) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(t.pvc.Name, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) t.pv, err = cs.CoreV1().PersistentVolumes().Get(t.pvc.Spec.VolumeName, metav1.GetOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Consuming the PVC before downgrade") t.pod, err = framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + framework.ExpectNoError(err) ginkgo.By("Checking if PV exists as expected volume mode") utils.CheckVolumeModeOfPath(t.pod, block, devicePath)