mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
fix staticcheck in test/e2e/network/
This commit is contained in:
parent
b1766b707a
commit
d51b72d9e9
@ -69,7 +69,6 @@ test/e2e/autoscaling
|
||||
test/e2e/instrumentation/logging/stackdriver
|
||||
test/e2e/instrumentation/monitoring
|
||||
test/e2e/manifest
|
||||
test/e2e/network
|
||||
test/e2e/storage
|
||||
test/e2e/storage/drivers
|
||||
test/e2e/storage/testsuites
|
||||
|
@ -212,7 +212,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
// All the names we need to be able to resolve.
|
||||
// for headless service.
|
||||
namesToResolve := []string{
|
||||
fmt.Sprintf("%s", headlessService.Name),
|
||||
headlessService.Name,
|
||||
fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
|
||||
fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
|
||||
fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
|
||||
|
@ -81,10 +81,6 @@ func (t *dnsTestCommon) init() {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) {
|
||||
t.checkDNSRecordFrom(name, predicate, "kube-dns", timeout)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) bool, target string, timeout time.Duration) {
|
||||
var actual []string
|
||||
|
||||
@ -118,7 +114,6 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
|
||||
case "cluster-dns":
|
||||
case "cluster-dns-ipv6":
|
||||
cmd = append(cmd, "AAAA")
|
||||
break
|
||||
default:
|
||||
panic(fmt.Errorf("invalid target: " + target))
|
||||
}
|
||||
@ -269,6 +264,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
pods, err := t.f.ClientSet.CoreV1().Pods("kube-system").List(options)
|
||||
framework.ExpectNoError(err, "failed to list pods of kube-system with label %q", label.String())
|
||||
podClient := t.c.CoreV1().Pods(metav1.NamespaceSystem)
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
@ -614,14 +610,6 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
for i := 0; i < len(arr)/2; i++ {
|
||||
j := len(arr) - i - 1
|
||||
arr[i], arr[j] = arr[j], arr[i]
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
func generateDNSUtilsPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@ -181,7 +181,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
framework.ExpectNoError(fmt.Errorf("Timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
|
||||
framework.ExpectNoError(fmt.Errorf("timed out waiting for ingress %s to get %s annotation", name, instanceGroupAnnotation))
|
||||
}
|
||||
|
||||
// Verify that the ingress does not get other annotations like url-map, target-proxy, backends, etc.
|
||||
|
@ -172,6 +172,8 @@ var _ = SIGDescribe("Network", func() {
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(options)
|
||||
framework.ExpectNoError(err, "could not marshal")
|
||||
|
||||
cmd := fmt.Sprintf(
|
||||
`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
|
||||
`'%v' 2>/dev/null`,
|
||||
|
@ -134,7 +134,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
framework.ExpectEqual(svcTier, cloud.NetworkTierStandard)
|
||||
|
||||
// Wait until the ingress IP changes and verifies the LB.
|
||||
ingressIP = waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
|
||||
waitAndVerifyLBWithTier(jig, ingressIP, createTimeout, lagTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -117,7 +117,7 @@ func getIP(iptype v1.NodeAddressType, node *v1.Node) (string, error) {
|
||||
|
||||
func getSchedulable(nodes []v1.Node) (*v1.Node, error) {
|
||||
for _, node := range nodes {
|
||||
if node.Spec.Unschedulable == false {
|
||||
if !node.Spec.Unschedulable {
|
||||
return &node, nil
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1beta1 "k8s.io/api/networking/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -118,7 +118,7 @@ func (f *IngressScaleFramework) PrepareScaleTest() error {
|
||||
Cloud: f.CloudConfig,
|
||||
}
|
||||
if err := f.GCEController.Init(); err != nil {
|
||||
return fmt.Errorf("Failed to initialize GCE controller: %v", err)
|
||||
return fmt.Errorf("failed to initialize GCE controller: %v", err)
|
||||
}
|
||||
|
||||
f.ScaleTestSvcs = []*v1.Service{}
|
||||
@ -135,7 +135,7 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error {
|
||||
for _, ing := range f.ScaleTestIngs {
|
||||
if ing != nil {
|
||||
if err := f.Clientset.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -143,14 +143,14 @@ func (f *IngressScaleFramework) CleanupScaleTest() []error {
|
||||
for _, svc := range f.ScaleTestSvcs {
|
||||
if svc != nil {
|
||||
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.ScaleTestDeploy != nil {
|
||||
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
|
||||
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(f.ScaleTestDeploy.Name, nil); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
errs = append(errs, fmt.Errorf("error while delting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,7 +170,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
|
||||
f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
|
||||
testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(testDeploy)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to create deployment %s: %v", testDeploy.Name, err))
|
||||
errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err))
|
||||
return errs
|
||||
}
|
||||
f.ScaleTestDeploy = testDeploy
|
||||
@ -178,7 +178,7 @@ func (f *IngressScaleFramework) RunScaleTest() []error {
|
||||
if f.EnableTLS {
|
||||
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
|
||||
if err := f.Jig.PrepareTLSSecret(f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
|
||||
errs = append(errs, fmt.Errorf("Failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
|
||||
errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
|
||||
return errs
|
||||
}
|
||||
}
|
||||
|
@ -933,24 +933,24 @@ var _ = SIGDescribe("Services", func() {
|
||||
// Change the services back to ClusterIP.
|
||||
|
||||
ginkgo.By("changing TCP service back to type=ClusterIP")
|
||||
tcpService, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
_, err = tcpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports[0].NodePort = 0
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
tcpService, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
_, err = tcpJig.WaitForLoadBalancerDestroy(tcpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("changing UDP service back to type=ClusterIP")
|
||||
udpService, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
_, err = udpJig.UpdateService(func(s *v1.Service) {
|
||||
s.Spec.Type = v1.ServiceTypeClusterIP
|
||||
s.Spec.Ports[0].NodePort = 0
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
if loadBalancerSupportsUDP {
|
||||
// Wait for the load balancer to be destroyed asynchronously
|
||||
udpService, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
_, err = udpJig.WaitForLoadBalancerDestroy(udpIngressIP, svcPort, loadBalancerCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -1381,7 +1381,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
service = t.BuildServiceSpec()
|
||||
service.Spec.Type = v1.ServiceTypeNodePort
|
||||
service.Spec.Ports[0].NodePort = nodePort
|
||||
service, err = t.CreateService(service)
|
||||
_, err = t.CreateService(service)
|
||||
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
|
||||
})
|
||||
|
||||
@ -2516,12 +2516,12 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
||||
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, true)).To(gomega.BeTrue())
|
||||
}
|
||||
if isTransitionTest {
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(e2eservice.CheckAffinity(execPod, svcIP, servicePort, false)).To(gomega.BeTrue())
|
||||
svc, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
_, err = jig.UpdateService(func(svc *v1.Service) {
|
||||
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -258,11 +258,10 @@ func (eq *endpointQueries) join() {
|
||||
delete(eq.requests, got.Name)
|
||||
req.endpoints = got
|
||||
close(req.result)
|
||||
} else {
|
||||
// We've already recorded a result, but
|
||||
// haven't gotten the request yet. Only
|
||||
// keep the first result.
|
||||
}
|
||||
// We've already recorded a result, but
|
||||
// haven't gotten the request yet. Only
|
||||
// keep the first result.
|
||||
} else {
|
||||
// We haven't gotten the corresponding request
|
||||
// yet, save this result.
|
||||
@ -352,7 +351,7 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie
|
||||
framework.Logf("Created: %v", gotSvc.Name)
|
||||
|
||||
if e := q.request(gotSvc.Name); e == nil {
|
||||
return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
|
||||
return 0, fmt.Errorf("never got a result for endpoint %v", gotSvc.Name)
|
||||
}
|
||||
stopTime := time.Now()
|
||||
d := stopTime.Sub(startTime)
|
||||
|
Loading…
Reference in New Issue
Block a user