From ff09cc54050c266fce6f5e957d57c44bc2a0a9a3 Mon Sep 17 00:00:00 2001 From: draveness Date: Thu, 4 Jul 2019 09:29:45 +0800 Subject: [PATCH] feat: use framework.ExpectEqual in upgrades and windows e2e test --- test/e2e/upgrades/apparmor.go | 2 +- test/e2e/upgrades/apps/BUILD | 1 - test/e2e/upgrades/apps/deployments.go | 9 ++++----- test/e2e/upgrades/cassandra.go | 2 +- test/e2e/upgrades/etcd.go | 2 +- test/e2e/upgrades/mysql.go | 2 +- test/e2e/upgrades/sysctl.go | 4 ++-- test/e2e/windows/density.go | 6 +++--- test/e2e/windows/memory_limits.go | 4 ++-- test/e2e/windows/volumes.go | 7 +++---- 10 files changed, 18 insertions(+), 21 deletions(-) diff --git a/test/e2e/upgrades/apparmor.go b/test/e2e/upgrades/apparmor.go index 706c4d465e8..d0fc4cb4e3d 100644 --- a/test/e2e/upgrades/apparmor.go +++ b/test/e2e/upgrades/apparmor.go @@ -87,7 +87,7 @@ func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { ginkgo.By("Verifying an AppArmor profile is continuously enforced for a pod") pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "Should be able to get pod") - gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning), "Pod should stay running") + framework.ExpectEqual(pod.Status.Phase, v1.PodRunning, "Pod should stay running") gomega.Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(gomega.BeNil(), "Container should be running") gomega.Expect(pod.Status.ContainerStatuses[0].RestartCount).To(gomega.BeZero(), "Container should not need to be restarted") } diff --git a/test/e2e/upgrades/apps/BUILD b/test/e2e/upgrades/apps/BUILD index 74e40d5c3ed..0926c210bc7 100644 --- a/test/e2e/upgrades/apps/BUILD +++ b/test/e2e/upgrades/apps/BUILD @@ -35,7 +35,6 @@ go_library( "//test/e2e/upgrades:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", ], ) diff --git a/test/e2e/upgrades/apps/deployments.go b/test/e2e/upgrades/apps/deployments.go index 432b4aa5059..2eacc69057f 100644 --- a/test/e2e/upgrades/apps/deployments.go +++ b/test/e2e/upgrades/apps/deployments.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -130,7 +129,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName)) - gomega.Expect(deployment.UID).To(gomega.Equal(t.oldDeploymentUID)) + framework.ExpectEqual(deployment.UID, t.oldDeploymentUID) ginkgo.By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName)) rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) @@ -144,15 +143,15 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{ switch t.oldRSUID { case rss[0].UID: - gomega.Expect(rss[1].UID).To(gomega.Equal(t.newRSUID)) + framework.ExpectEqual(rss[1].UID, t.newRSUID) case rss[1].UID: - gomega.Expect(rss[0].UID).To(gomega.Equal(t.newRSUID)) + framework.ExpectEqual(rss[0].UID, t.newRSUID) default: framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName)) } ginkgo.By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName)) - gomega.Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(gomega.Equal("2")) + framework.ExpectEqual(deployment.Annotations[deploymentutil.RevisionAnnotation], "2") ginkgo.By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName)) framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(c, deployment)) diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index 7c0dfabad18..2acedb46160 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -108,7 +108,7 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Verifying that the users exist") users, err := t.listUsers() framework.ExpectNoError(err) - gomega.Expect(len(users)).To(gomega.Equal(2)) + framework.ExpectEqual(len(users), 2) } // listUsers gets a list of users from the db via the tester service. diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index 3a1b3f58905..ae7400e2cee 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -103,7 +103,7 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Verifying that the users exist") users, err := t.listUsers() framework.ExpectNoError(err) - gomega.Expect(len(users)).To(gomega.Equal(2)) + framework.ExpectEqual(len(users), 2) } func (t *EtcdUpgradeTest) listUsers() ([]string, error) { diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 6af0b708f21..56744ec0200 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -117,7 +117,7 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Verifying that the 2 names have been inserted") count, err := t.countNames() framework.ExpectNoError(err) - gomega.Expect(count).To(gomega.Equal(2)) + framework.ExpectEqual(count, 2) } // Test continually polls the db using the read and write connections, inserting data, and checking diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index 65c4b7f5b4c..abe7eec1546 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -56,7 +56,7 @@ func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, u ginkgo.By("Checking the safe sysctl pod keeps running on master upgrade") pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodRunning)) + framework.ExpectEqual(pod.Status.Phase, v1.PodRunning) } ginkgo.By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade") @@ -108,7 +108,7 @@ func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framewor if ev != nil && ev.Reason == sysctl.UnsupportedReason { framework.Skipf("No sysctl support in Docker <1.12") } - gomega.Expect(ev.Reason).To(gomega.Equal(sysctl.ForbiddenReason)) + framework.ExpectEqual(ev.Reason, sysctl.ForbiddenReason) return invalidPod } diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 77baa3d995b..9132b55f4b7 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -132,7 +132,7 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura for name, create := range createTimes { watch, ok := watchTimes[name] - gomega.Expect(ok).To(gomega.Equal(true)) + framework.ExpectEqual(ok, true) e2eLags = append(e2eLags, framework.PodLatencyData{Name: name, Latency: watch.Time.Sub(create.Time)}) @@ -202,12 +202,12 @@ func newInformerWatchPod(f *framework.Framework, mutex *sync.Mutex, watchTimes m cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { p, ok := obj.(*v1.Pod) - gomega.Expect(ok).To(gomega.Equal(true)) + framework.ExpectEqual(ok, true) go checkPodRunning(p) }, UpdateFunc: func(oldObj, newObj interface{}) { p, ok := newObj.(*v1.Pod) - gomega.Expect(ok).To(gomega.Equal(true)) + framework.ExpectEqual(ok, true) go checkPodRunning(p) }, }, diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index 9de6b2b8bd5..4331664edb5 100755 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -97,7 +97,7 @@ func checkNodeAllocatableTest(f *framework.Framework) { ginkgo.By(fmt.Sprintf("Checking stated allocatable memory %v against calculated allocatable memory %v", &nodeMem.allocatable, calculatedNodeAlloc)) // sanity check against stated allocatable - gomega.Expect(calculatedNodeAlloc.Cmp(nodeMem.allocatable)).To(gomega.Equal(0)) + framework.ExpectEqual(calculatedNodeAlloc.Cmp(nodeMem.allocatable), 0) } // Deploys `allocatablePods + 1` pods, each with a memory limit of `1/allocatablePods` of the total allocatable @@ -277,7 +277,7 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName str output := string(buf[:n]) proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") match := proxyRegexp.FindStringSubmatch(output) - gomega.Expect(len(match)).To(gomega.Equal(2)) + framework.ExpectEqual(len(match), 2) port, err := strconv.Atoi(match[1]) framework.ExpectNoError(err) ginkgo.By("http requesting node kubelet /configz") diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 21120bd1928..e8fe8814b74 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -26,7 +26,6 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" ) const ( @@ -95,7 +94,7 @@ func doReadOnlyTest(f *framework.Framework, source v1.VolumeSource, volumePath s _, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) - gomega.Expect(stderr).To(gomega.Equal("Access is denied.")) + framework.ExpectEqual(stderr, "Access is denied.") } @@ -128,12 +127,12 @@ func doReadWriteReadOnlyTest(f *framework.Framework, source v1.VolumeSource, vol framework.ExpectNoError(errRW, msg) _, stderr, _ := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...) - gomega.Expect(stderr).To(gomega.Equal("Access is denied.")) + framework.ExpectEqual(stderr, "Access is denied.") readcmd := []string{"cmd", "/c", "type", filePath} readout, readerr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, readcmd...) readmsg := fmt.Sprintf("cmd: %v, stdout: %q, stderr: %q", readcmd, readout, readerr) - gomega.Expect(readout).To(gomega.Equal("windows-volume-test")) + framework.ExpectEqual(readout, "windows-volume-test") framework.ExpectNoError(err, readmsg) }