From d51b72d9e9bcacd3041db3a167c9c3100abea887 Mon Sep 17 00:00:00 2001 From: tanjunchen <2799194073@qq.com> Date: Thu, 5 Dec 2019 13:58:28 +0800 Subject: [PATCH] fix staticcheck in test/e2e/network/ --- hack/.staticcheck_failures | 1 - test/e2e/network/dns.go | 2 +- test/e2e/network/dns_common.go | 14 +------------- test/e2e/network/ingress.go | 2 +- test/e2e/network/kube_proxy.go | 2 ++ test/e2e/network/network_tiers.go | 2 +- test/e2e/network/no_snat.go | 4 ++-- test/e2e/network/scale/ingress.go | 14 +++++++------- test/e2e/network/service.go | 14 +++++++------- test/e2e/network/service_latency.go | 9 ++++----- 10 files changed, 26 insertions(+), 38 deletions(-) diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index b850e0913ac..eeeeada27e3 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -69,7 +69,6 @@ test/e2e/autoscaling test/e2e/instrumentation/logging/stackdriver test/e2e/instrumentation/monitoring test/e2e/manifest -test/e2e/network test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 54a06219699..e650ff50a6f 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -212,7 +212,7 @@ var _ = SIGDescribe("DNS", func() { // All the names we need to be able to resolve. // for headless service. namesToResolve := []string{ - fmt.Sprintf("%s", headlessService.Name), + headlessService.Name, fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name), fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name), fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name), diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index fae706edefb..6a1e629bcaa 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -81,10 +81,6 @@ func (t *dnsTestCommon) init() { } } -func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) { - t.checkDNSRecordFrom(name, predicate, "kube-dns", timeout) -} - func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) bool, target string, timeout time.Duration) { var actual []string @@ -118,7 +114,6 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string { case "cluster-dns": case "cluster-dns-ipv6": cmd = append(cmd, "AAAA") - break default: panic(fmt.Errorf("invalid target: " + target)) } @@ -269,6 +264,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() { options := metav1.ListOptions{LabelSelector: label.String()} pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options) + framework.ExpectNoError(err, "failed to list pods of kube-system with label %q", label.String()) podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem) for _, pod := range pods.Items { @@ -614,14 +610,6 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames framework.Logf("DNS probes using %s succeeded\n", pod.Name) } -func reverseArray(arr []string) []string { - for i := 0; i < len(arr)/2; i++ { - j := len(arr) - i - 1 - arr[i], arr[j] = arr[j], arr[i] - } - return arr -} - func generateDNSUtilsPod() *v1.Pod { return &v1.Pod{ TypeMeta: metav1.TypeMeta{ diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index e9ae7b3c3bd..93131e99b5a 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -181,7 +181,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { return true, nil }) if pollErr != nil { - framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation)) + framework.ExpectNoError(fmt.Errorf("timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation)) } // Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc. diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index 118caa691c9..10d2cb86e69 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -172,6 +172,8 @@ var _ = SIGDescribe("Network", func() { } jsonBytes, err := json.Marshal(options) + framework.ExpectNoError(err, "could not marshal") + cmd := fmt.Sprintf( `curl -X POST http://localhost:%v/run/nat-closewait-client -d `+ `'%v' 2>/dev/null`, diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index d1386410076..5edda5e5fa6 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -134,7 +134,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { framework.ExpectEqual(svcTier, cloud.NetworkTierStandard) // Wait until the ingress IP changes and verifies the LB. - ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout) + waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout) }) }) diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index 4874bb1774c..7a61c1f21f9 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -24,7 +24,7 @@ import ( "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" @@ -117,7 +117,7 @@ func getIP(iptype v1.NodeAddressType, node *v1.Node) (string, error) { func getSchedulable(nodes []v1.Node) (*v1.Node, error) { for _, node := range nodes { - if node.Spec.Unschedulable == false { + if !node.Spec.Unschedulable { return &node, nil } } diff --git a/test/e2e/network/scale/ingress.go b/test/e2e/network/scale/ingress.go index 45c526f5cb9..78f6f6cebbe 100644 --- a/test/e2e/network/scale/ingress.go +++ b/test/e2e/network/scale/ingress.go @@ -23,7 +23,7 @@ import ( "time" appsv1 "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -118,7 +118,7 @@ func (f *IngressScaleFramework) PrepareScaleTest() error { Cloud: f.CloudConfig, } if err := f.GCEController.Init(); err != nil { - return fmt.Errorf("Failed to initialize GCE controller: %v", err) + return fmt.Errorf("failed to initialize GCE controller: %v", err) } f.ScaleTestSvcs = []*v1.Service{} @@ -135,7 +135,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { for _, ing := range f.ScaleTestIngs { if ing != nil { if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil { - errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) } } } @@ -143,14 +143,14 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error { for _, svc := range f.ScaleTestSvcs { if svc != nil { if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil { - errs = append(errs, fmt.Errorf("Error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) } } } if f.ScaleTestDeploy != nil { f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil { - errs = append(errs, fmt.Errorf("Error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) + errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) } } @@ -170,7 +170,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { f.Logger.Infof("Creating deployment %s...", testDeploy.Name) testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(testDeploy) if err != nil { - errs = append(errs, fmt.Errorf("Failed to create deployment %s: %v", testDeploy.Name, err)) + errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err)) return errs } f.ScaleTestDeploy = testDeploy @@ -178,7 +178,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error { if f.EnableTLS { f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName) if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil { - errs = append(errs, fmt.Errorf("Failed to prepare TLS secret %s: %v", scaleTestSecretName, err)) + errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err)) return errs } } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index d6d93dc700a..94a6079c518 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -933,24 +933,24 @@ var _ = SIGDescribe("Services", func() { // Change the services back to ClusterIP. ginkgo.By("changing TCP service back to type=ClusterIP") - tcpService, err = tcpJig.UpdateService(func(s *v1.Service) { + _, err = tcpJig.UpdateService(func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) framework.ExpectNoError(err) // Wait for the load balancer to be destroyed asynchronously - tcpService, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout) + _, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout) framework.ExpectNoError(err) ginkgo.By("changing UDP service back to type=ClusterIP") - udpService, err = udpJig.UpdateService(func(s *v1.Service) { + _, err = udpJig.UpdateService(func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) framework.ExpectNoError(err) if loadBalancerSupportsUDP { // Wait for the load balancer to be destroyed asynchronously - udpService, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout) + _, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout) framework.ExpectNoError(err) } @@ -1381,7 +1381,7 @@ var _ = SIGDescribe("Services", func() { service = t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort - service, err = t.CreateService(service) + _, err = t.CreateService(service) framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) }) @@ -2516,12 +2516,12 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } if isTransitionTest { - svc, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) framework.ExpectNoError(err) gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) - svc, err = jig.UpdateService(func(svc *v1.Service) { + _, err = jig.UpdateService(func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) framework.ExpectNoError(err) diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index b0ef767344e..741dbae4fca 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -258,11 +258,10 @@ func (eq *endpointQueries) join() { delete(eq.requests, got.Name) req.endpoints = got close(req.result) - } else { - // We've already recorded a result, but - // haven't gotten the request yet. Only - // keep the first result. } + // We've already recorded a result, but + // haven't gotten the request yet. Only + // keep the first result. } else { // We haven't gotten the corresponding request // yet, save this result. @@ -352,7 +351,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie framework.Logf("Created: %v", gotSvc.Name) if e := q.request(gotSvc.Name); e == nil { - return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name) + return 0, fmt.Errorf("never got a result for endpoint %v", gotSvc.Name) } stopTime := time.Now() d := stopTime.Sub(startTime)