From f786cb07b792590a69cf3e580e828370fe577f20 Mon Sep 17 00:00:00 2001 From: tanjunchen <2799194073@qq.com> Date: Fri, 13 Dec 2019 09:58:42 +0800 Subject: [PATCH] test/e2e/: use framework.Equal() replace gomega.Expect(...).To(gomega.BeTrue()|BeFalse()) --- .../apimachinery/crd_conversion_webhook.go | 36 ++++++++++--------- test/e2e/apimachinery/webhook.go | 2 +- test/e2e/apps/cronjob.go | 4 +-- test/e2e/apps/daemon_set.go | 4 +-- test/e2e/apps/deployment.go | 2 +- test/e2e/apps/job.go | 4 +-- test/e2e/common/container_probe.go | 4 +-- test/e2e/framework/providers/kubemark/BUILD | 1 - .../framework/providers/kubemark/kubemark.go | 4 +-- test/e2e/kubectl/kubectl.go | 8 ++--- test/e2e/network/service.go | 14 ++++---- test/e2e/node/runtimeclass.go | 2 +- test/e2e/scheduling/preemption.go | 4 +-- test/e2e/scheduling/ubernetes_lite_volumes.go | 7 ++-- 14 files changed, 47 insertions(+), 49 deletions(-) diff --git a/test/e2e/apimachinery/crd_conversion_webhook.go b/test/e2e/apimachinery/crd_conversion_webhook.go index 7d62d72865b..441bea6998f 100644 --- a/test/e2e/apimachinery/crd_conversion_webhook.go +++ b/test/e2e/apimachinery/crd_conversion_webhook.go @@ -371,26 +371,28 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string, framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1) } -func verifyV1Object(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) { +func verifyV1Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) { gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v1")) hostPort, exists := obj.Object["hostPort"] - gomega.Expect(exists).To(gomega.BeTrue()) + framework.ExpectEqual(exists, true) + gomega.Expect(hostPort).To(gomega.BeEquivalentTo("localhost:8080")) _, hostExists := obj.Object["host"] - gomega.Expect(hostExists).To(gomega.BeFalse()) + framework.ExpectEqual(hostExists, false) _, portExists := obj.Object["port"] - gomega.Expect(portExists).To(gomega.BeFalse()) + framework.ExpectEqual(portExists, false) } -func verifyV2Object(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) { +func verifyV2Object(crd *apiextensionsv1.CustomResourceDefinition, obj *unstructured.Unstructured) { gomega.Expect(obj.GetAPIVersion()).To(gomega.BeEquivalentTo(crd.Spec.Group + "/v2")) _, hostPortExists := obj.Object["hostPort"] - gomega.Expect(hostPortExists).To(gomega.BeFalse()) + framework.ExpectEqual(hostPortExists, false) + host, hostExists := obj.Object["host"] - gomega.Expect(hostExists).To(gomega.BeTrue()) + framework.ExpectEqual(hostExists, true) gomega.Expect(host).To(gomega.BeEquivalentTo("localhost")) port, portExists := obj.Object["port"] - gomega.Expect(portExists).To(gomega.BeTrue()) + framework.ExpectEqual(portExists, true) gomega.Expect(port).To(gomega.BeEquivalentTo("8080")) } @@ -413,7 +415,7 @@ func testCustomResourceConversionWebhook(f *framework.Framework, crd *apiextensi ginkgo.By("v2 custom resource should be converted") v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{}) framework.ExpectNoError(err, "Getting v2 of custom resource %s", name) - verifyV2Object(f, crd, v2crd) + verifyV2Object(crd, v2crd) } func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { @@ -473,19 +475,19 @@ func testCRListConversion(f *framework.Framework, testCrd *crd.TestCrd) { list, err := customResourceClients["v1"].List(metav1.ListOptions{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) - gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || - (list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue()) - verifyV1Object(f, crd, &list.Items[0]) - verifyV1Object(f, crd, &list.Items[1]) + framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || + (list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true) + verifyV1Object(crd, &list.Items[0]) + verifyV1Object(crd, &list.Items[1]) ginkgo.By("List CRs in v2") list, err = customResourceClients["v2"].List(metav1.ListOptions{}) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(len(list.Items)).To(gomega.BeIdenticalTo(2)) - gomega.Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || - (list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(gomega.BeTrue()) - verifyV2Object(f, crd, &list.Items[0]) - verifyV2Object(f, crd, &list.Items[1]) + framework.ExpectEqual((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) || + (list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1), true) + verifyV2Object(crd, &list.Items[0]) + verifyV2Object(crd, &list.Items[1]) } // waitWebhookConversionReady sends stub custom resource creation requests requiring conversion until one succeeds. diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 3b64949f678..ad37e9e137a 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -995,7 +995,7 @@ func testMutatingConfigMapWebhook(f *framework.Framework) { client := f.ClientSet configMap := toBeMutatedConfigMap(f) mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap) - gomega.Expect(err).To(gomega.BeNil()) + framework.ExpectNoError(err) expectedConfigMapData := map[string]string{ "mutation-start": "yes", "mutation-stage-1": "yes", diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index d8948486f8c..1558135452f 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -25,7 +25,7 @@ import ( batchv1 "k8s.io/api/batch/v1" batchv1beta1 "k8s.io/api/batch/v1beta1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -214,7 +214,7 @@ var _ = SIGDescribe("CronJob", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + framework.ExpectEqual(errors.IsNotFound(err), true) ginkgo.By("Ensuring the job is not in the cronjob active list") err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index f7528160962..3d9771cb0fe 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -23,7 +23,7 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -478,7 +478,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { rollbackPods[pod.Name] = true } for _, pod := range existingPods { - gomega.Expect(rollbackPods[pod.Name]).To(gomega.BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name)) + framework.ExpectEqual(rollbackPods[pod.Name], true, fmt.Sprintf("unexpected pod %s be restarted", pod.Name)) } }) }) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 26a06ba285f..38e5e2308b3 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -205,7 +205,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { framework.Logf("Ensuring deployment %s was deleted", deploymentName) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) framework.ExpectError(err) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + framework.ExpectEqual(errors.IsNotFound(err), true) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) framework.ExpectNoError(err) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index ead1669792f..e3ee4497155 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -20,7 +20,7 @@ import ( "fmt" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchinternal "k8s.io/kubernetes/pkg/apis/batch" @@ -162,7 +162,7 @@ var _ = SIGDescribe("Job", func() { ginkgo.By("Ensuring job was deleted") _, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name) framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + framework.ExpectEqual(errors.IsNotFound(err), true) }) /* diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index e1b19ca38f0..5205b25fbab 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Probing container", func() { } restartCount := getRestartCount(p) - gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount) + framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount) }) /* @@ -108,7 +108,7 @@ var _ = framework.KubeDescribe("Probing container", func() { framework.ExpectNotEqual(isReady, true, "pod should be not ready") restartCount := getRestartCount(p) - gomega.Expect(restartCount == 0).To(gomega.BeTrue(), "pod should have a restart count of 0 but got %v", restartCount) + framework.ExpectEqual(restartCount, 0, "pod should have a restart count of 0 but got %v", restartCount) }) /* diff --git a/test/e2e/framework/providers/kubemark/BUILD b/test/e2e/framework/providers/kubemark/BUILD index 118a61d6fb3..a905b20c520 100644 --- a/test/e2e/framework/providers/kubemark/BUILD +++ b/test/e2e/framework/providers/kubemark/BUILD @@ -11,7 +11,6 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library", "//test/e2e/framework:go_default_library", - "//vendor/github.com/onsi/gomega:go_default_library", ], ) diff --git a/test/e2e/framework/providers/kubemark/kubemark.go b/test/e2e/framework/providers/kubemark/kubemark.go index 52dde1e5ae7..cd5b79e99ff 100644 --- a/test/e2e/framework/providers/kubemark/kubemark.go +++ b/test/e2e/framework/providers/kubemark/kubemark.go @@ -25,8 +25,6 @@ import ( "k8s.io/client-go/tools/clientcmd" "k8s.io/kubernetes/pkg/kubemark" "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/gomega" ) var ( @@ -77,7 +75,7 @@ func (p *Provider) FrameworkBeforeEach(f *framework.Framework) { p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer) framework.ExpectNoError(err) externalInformerFactory.Start(p.closeChannel) - gomega.Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(gomega.BeTrue()) + framework.ExpectEqual(p.controller.WaitForCacheSync(p.closeChannel), true) go p.controller.Run(p.closeChannel) } } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index b378d985f78..e2599c13f5d 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -527,7 +527,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) @@ -1385,7 +1385,7 @@ metadata: podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) - gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, pausePodSelector) @@ -1426,7 +1426,7 @@ metadata: nsFlag = fmt.Sprintf("--namespace=%v", ns) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) framework.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-", nsFlag) - gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + framework.ExpectEqual(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) @@ -1969,7 +1969,7 @@ metadata: ginkgo.By("verifying the job " + jobName + " was deleted") _, err = c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) framework.ExpectError(err) - gomega.Expect(apierrs.IsNotFound(err)).To(gomega.BeTrue()) + framework.ExpectEqual(apierrs.IsNotFound(err), true) }) }) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index e1a4052003c..6a96ac82d36 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -2198,7 +2198,7 @@ var _ = SIGDescribe("Services", func() { lbIngress := &svc.Status.LoadBalancer.Ingress[0] svcPort := int(svc.Spec.Ports[0].Port) // should have an internal IP. - gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeTrue()) + framework.ExpectEqual(isInternalEndpoint(lbIngress), true) // ILBs are not accessible from the test orchestrator, so it's necessary to use // a pod to test the service. @@ -3060,19 +3060,19 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor framework.ExpectNoError(err) if !isTransitionTest { - gomega.Expect(checkAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true) } if isTransitionTest { _, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) framework.ExpectNoError(err) - gomega.Expect(checkAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, false), true) _, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) framework.ExpectNoError(err) - gomega.Expect(checkAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true) } } @@ -3110,19 +3110,19 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - gomega.Expect(checkAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true) } if isTransitionTest { svc, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) framework.ExpectNoError(err) - gomega.Expect(checkAffinity(nil, ingressIP, port, false)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(nil, ingressIP, port, false), true) svc, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) framework.ExpectNoError(err) - gomega.Expect(checkAffinity(nil, ingressIP, port, true)).To(gomega.BeTrue()) + framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true) } } diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index fb782d8a4ae..0224bb3be9c 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -55,7 +55,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() { } _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err, "should be forbidden") - gomega.Expect(apierrs.IsForbidden(err)).To(gomega.BeTrue(), "should be forbidden error") + framework.ExpectEqual(apierrs.IsForbidden(err), true, "should be forbidden error") }) ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling [NodeFeature:RuntimeHandler] [Disruptive] ", func() { diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 106e92e4a5d..5cbdca2c773 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -145,7 +145,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) podDeleted := (err != nil && errors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) - gomega.Expect(podDeleted).To(gomega.BeTrue()) + framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. for i := 1; i < len(pods); i++ { livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) @@ -214,7 +214,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { preemptedPod, err := cs.CoreV1().Pods(pods[0].Namespace).Get(pods[0].Name, metav1.GetOptions{}) podDeleted := (err != nil && errors.IsNotFound(err)) || (err == nil && preemptedPod.DeletionTimestamp != nil) - gomega.Expect(podDeleted).To(gomega.BeTrue()) + framework.ExpectEqual(podDeleted, true) // Other pods (mid priority ones) should be present. for i := 1; i < len(pods); i++ { livePod, err := cs.CoreV1().Pods(pods[i].Namespace).Get(pods[i].Name, metav1.GetOptions{}) diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 86c05124a27..141ce8f1c22 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -21,9 +21,8 @@ import ( "strconv" "github.com/onsi/ginkgo" - "github.com/onsi/gomega" compute "google.golang.org/api/compute/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -168,10 +167,10 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { framework.ExpectNoError(err) pvZone, ok := pv.ObjectMeta.Labels[v1.LabelZoneFailureDomain] - gomega.Expect(ok).To(gomega.BeTrue(), "PV has no LabelZone to be found") + framework.ExpectEqual(ok, true, "PV has no LabelZone to be found") pvZones.Insert(pvZone) } - gomega.Expect(pvZones.Equal(expectedZones)).To(gomega.BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) + framework.ExpectEqual(pvZones.Equal(expectedZones), true, fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones)) } type staticPVTestConfig struct {