From c0db5b2562f3dd088e102f267d7f3349aec2928f Mon Sep 17 00:00:00 2001 From: Xuewei Zhang Date: Wed, 4 Dec 2019 19:01:32 -0800 Subject: [PATCH] Convert ExpectEqual(err, nil) to ExpectNoError(err) --- test/e2e/cloud/gcp/node_lease.go | 12 ++++++------ test/e2e/cloud/nodes.go | 2 +- test/e2e_node/container_manager_test.go | 6 +++--- test/e2e_node/device_plugin_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index 09eb2ecccbb..e9d36ab6b16 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { c = f.ClientSet ns = f.Namespace.Name systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) @@ -94,13 +94,13 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) }) ginkgo.It("node lease should be deleted when corresponding node is deleted", func() { leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease) err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) ginkgo.By("verify node lease exists for every nodes") originalNodes, err := e2enode.GetReadySchedulableNodes(c) @@ -124,11 +124,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1) ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes)) err = framework.ResizeGroup(group, targetNumNodes) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) err = framework.WaitForGroupSize(group, targetNumNodes) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) targetNodes, err := e2enode.GetReadySchedulableNodes(c) framework.ExpectNoError(err) framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes)) diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index d18bd1caf03..76b9bb331c0 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -61,7 +61,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { } newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute) - framework.ExpectEqual(err, nil) + framework.ExpectNoError(err) framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1) _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 3cec8138657..afaf3bf0234 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { ginkgo.Context("once the node is setup", func() { ginkgo.It("container runtime's oom-score-adj should be -999", func() { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) - gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids") + framework.ExpectNoError(err, "failed to get list of container runtime pids") for _, pid := range runtimePids { gomega.Eventually(func() error { return validateOOMScoreAdjSetting(pid, -999) @@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { }) ginkgo.It("Kubelet's oom-score-adj should be -999", func() { kubeletPids, err := getPidsForProcess(kubeletProcessName, "") - gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids") + framework.ExpectNoError(err, "failed to get list of kubelet pids") framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids)) gomega.Eventually(func() error { return validateOOMScoreAdjSetting(kubeletPids[0], -999) @@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { // created before this test, and may not be infra // containers. They should be excluded from the test. existingPausePIDs, err := getPidsForProcess("pause", "") - gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node") + framework.ExpectNoError(err, "failed to list all pause processes on the node") existingPausePIDSet := sets.NewInt(existingPausePIDs...) podClient := f.PodClient() diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 7cf4a934023..03a87f27adb 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -102,7 +102,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { podResources, err := getNodeDevices() var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources framework.Logf("pod resources %v", podResources) - gomega.Expect(err).To(gomega.BeNil()) + framework.ExpectNoError(err) framework.ExpectEqual(len(podResources.PodResources), 2) for _, res := range podResources.GetPodResources() { if res.Name == pod1.Name { diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 5c84248e385..c6a02fd1e3f 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete nodeTime = time.Now() bootTime, err = util.GetBootTime() - gomega.Expect(err).To(gomega.BeNil()) + framework.ExpectNoError(err) // Set lookback duration longer than node up time. // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.