diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index f4036458c03..2b469c9979f 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -131,17 +131,6 @@ test/e2e/apps test/e2e/auth test/e2e/autoscaling test/e2e/common -test/e2e/framework -test/e2e/framework/ingress -test/e2e/framework/kubelet -test/e2e/framework/node -test/e2e/framework/pod -test/e2e/framework/podlogs -test/e2e/framework/providers/aws -test/e2e/framework/providers/gce -test/e2e/framework/psp -test/e2e/framework/service -test/e2e/framework/volume test/e2e/instrumentation/logging/stackdriver test/e2e/instrumentation/monitoring test/e2e/lifecycle diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index c265a2e885e..806fff70f41 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -95,12 +95,6 @@ const ( // IngressReqTimeout is the timeout on a single http request. IngressReqTimeout = 10 * time.Second - // healthz port used to verify glbc restarted correctly on the master. - glbcHealthzPort = 8086 - - // General cloud resource poll timeout (eg: create static ip, firewall etc) - cloudResourcePollTimeout = 5 * time.Minute - // NEGAnnotation is NEG annotation. NEGAnnotation = "cloud.google.com/neg" diff --git a/test/e2e/framework/kubelet/stats.go b/test/e2e/framework/kubelet/stats.go index 0f7461f0971..f3171a1915c 100644 --- a/test/e2e/framework/kubelet/stats.go +++ b/test/e2e/framework/kubelet/stats.go @@ -304,14 +304,12 @@ func GetOneTimeResourceUsageOnNode( // Process container infos that are relevant to us. containers := containerNames() usageMap := make(ResourceUsagePerContainer, len(containers)) - observedContainers := []string{} for _, pod := range summary.Pods { for _, container := range pod.Containers { isInteresting := false for _, interestingContainerName := range containers { if container.Name == interestingContainerName { isInteresting = true - observedContainers = append(observedContainers, container.Name) break } } diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index e8aaec3269b..b8c37d30a06 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -621,18 +621,6 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) { } } -func (config *NetworkingTestConfig) cleanup() { - nsClient := config.getNamespacesClient() - nsList, err := nsClient.List(metav1.ListOptions{}) - if err == nil { - for _, ns := range nsList.Items { - if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace { - nsClient.Delete(ns.Name, nil) - } - } - } -} - // shuffleNodes copies nodes from the specified slice into a copy in random // order. It returns a new slice. func shuffleNodes(nodes []v1.Node) []v1.Node { @@ -713,10 +701,6 @@ func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInte return config.f.ClientSet.CoreV1().Services(config.Namespace) } -func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface { - return config.f.ClientSet.CoreV1().Namespaces() -} - // CheckReachabilityFromPod checks reachability from the specified pod. func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, namespace, pod, target string) { cmd := fmt.Sprintf("wget -T 5 -qO- %q", target) diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index c15111b7980..88328dbf325 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -96,9 +96,10 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT if !hasNodeControllerTaints { msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) + } else { + msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure", + conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) } - msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure", - conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) if !silent { e2elog.Logf(msg) } diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go index 87f86386979..0d3b4b274c1 100644 --- a/test/e2e/framework/pod/resource.go +++ b/test/e2e/framework/pod/resource.go @@ -58,20 +58,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) { gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) } -// TODO: Move to its own subpkg. -// expectNoErrorWithRetries checks if an error occurs with the given retry count. -func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { - var err error - for i := 0; i < maxRetries; i++ { - err = fn() - if err == nil { - return - } - e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err) - } - gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...) -} - func isElementOf(podUID types.UID, pods *v1.PodList) bool { for _, pod := range pods.Items { if pod.UID == podUID { diff --git a/test/e2e/framework/podlogs/podlogs.go b/test/e2e/framework/podlogs/podlogs.go index 90911e885fd..cfa7c3e80a2 100644 --- a/test/e2e/framework/podlogs/podlogs.go +++ b/test/e2e/framework/podlogs/podlogs.go @@ -251,10 +251,10 @@ func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Wri ) } else if cst.State.Running != nil { fmt.Fprintf(buffer, "RUNNING") - } else if cst.State.Waiting != nil { + } else if cst.State.Terminated != nil { fmt.Fprintf(buffer, "TERMINATED: %s - %s", - cst.State.Waiting.Reason, - cst.State.Waiting.Message, + cst.State.Terminated.Reason, + cst.State.Terminated.Message, ) } fmt.Fprintf(buffer, "\n") diff --git a/test/e2e/framework/providers/aws/aws.go b/test/e2e/framework/providers/aws/aws.go index 73f7b450ee7..3ed2335507b 100644 --- a/test/e2e/framework/providers/aws/aws.go +++ b/test/e2e/framework/providers/aws/aws.go @@ -50,13 +50,21 @@ type Provider struct { // ResizeGroup resizes an instance group func (p *Provider) ResizeGroup(group string, size int32) error { - client := autoscaling.New(session.New()) + awsSession, err := session.NewSession() + if err != nil { + return err + } + client := autoscaling.New(awsSession) return awscloud.ResizeInstanceGroup(client, group, int(size)) } // GroupSize returns the size of an instance group func (p *Provider) GroupSize(group string) (int, error) { - client := autoscaling.New(session.New()) + awsSession, err := session.NewSession() + if err != nil { + return -1, err + } + client := autoscaling.New(awsSession) instanceGroup, err := awscloud.DescribeInstanceGroup(client, group) if err != nil { return -1, fmt.Errorf("error describing instance group: %v", err) @@ -151,5 +159,9 @@ func newAWSClient(zone string) *ec2.EC2 { region := zone[:len(zone)-1] cfg = &aws.Config{Region: aws.String(region)} } - return ec2.New(session.New(), cfg) + session, err := session.NewSession() + if err != nil { + e2elog.Logf("Warning: failed to create aws session") + } + return ec2.New(session, cfg) } diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index 2da229ef7b9..50f5173f766 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -70,11 +70,8 @@ type backendType string // IngressController manages implementation details of Ingress on GCE/GKE. type IngressController struct { Ns string - rcPath string UID string staticIPName string - rc *v1.ReplicationController - svc *v1.Service Client clientset.Interface Cloud framework.CloudConfig } diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index 1ac18ab7811..ba77d71a2d5 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -58,6 +58,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() { e2elog.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes)) ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything()) + framework.ExpectNoError(err) allPods := ps.List() originalPods := e2epod.FilterNonRestartablePods(allPods) originalPodNames = make([]string, len(originalPods)) diff --git a/test/e2e/framework/psp/psp.go b/test/e2e/framework/psp/psp.go index 18c9b192e8f..e3312b497cc 100644 --- a/test/e2e/framework/psp/psp.go +++ b/test/e2e/framework/psp/psp.go @@ -118,7 +118,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string } psp := privilegedPSP(podSecurityPolicyPrivileged) - psp, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp) + _, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp) if !apierrs.IsAlreadyExists(err) { ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged) } diff --git a/test/e2e/framework/service/jig.go b/test/e2e/framework/service/jig.go index 238bb3f9e66..ffcb3239dc9 100644 --- a/test/e2e/framework/service/jig.go +++ b/test/e2e/framework/service/jig.go @@ -229,7 +229,7 @@ func (j *TestJig) CreateOnlyLocalNodePortService(namespace, serviceName string, func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool, tweak func(svc *v1.Service)) *v1.Service { ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and ExternalTrafficPolicy=Local") - svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { + j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer // We need to turn affinity off for our LB distribution tests svc.Spec.SessionAffinity = v1.ServiceAffinityNone @@ -244,7 +244,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri j.RunOrFail(namespace, nil) } ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName) - svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) + svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) return svc } @@ -253,7 +253,7 @@ func (j *TestJig) CreateOnlyLocalLoadBalancerService(namespace, serviceName stri // for it to acquire an ingress IP. func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeout time.Duration, tweak func(svc *v1.Service)) *v1.Service { ginkgo.By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer") - svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { + j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer // We need to turn affinity off for our LB distribution tests svc.Spec.SessionAffinity = v1.ServiceAffinityNone @@ -263,7 +263,7 @@ func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeo }) ginkgo.By("waiting for loadbalancer for service " + namespace + "/" + serviceName) - svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) + svc := j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout) j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) return svc } diff --git a/test/e2e/framework/suites.go b/test/e2e/framework/suites.go index 97ef26fc2d7..083c129d9c7 100644 --- a/test/e2e/framework/suites.go +++ b/test/e2e/framework/suites.go @@ -31,10 +31,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) -var ( - cloudConfig = &TestContext.CloudConfig -) - // SetupSuite is the boilerplate that can be used to setup ginkgo test suites, on the SynchronizedBeforeSuite step. // There are certain operations we only want to run once per overall test invocation // (such as deleting old namespaces, or verifying that all system pods are running. diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index eec98bf2790..defad96dc53 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -37,7 +37,6 @@ import ( "strings" "sync" "syscall" - "text/tabwriter" "time" "golang.org/x/net/websocket" @@ -120,9 +119,6 @@ const ( // failures caused by leaked resources from a previous test run. NamespaceCleanupTimeout = 15 * time.Minute - // Some pods can take much longer to get ready due to volume attach/detach latency. - slowPodStartTimeout = 15 * time.Minute - // ServiceStartTimeout is how long to wait for a service endpoint to be resolvable. ServiceStartTimeout = 3 * time.Minute @@ -149,10 +145,6 @@ const ( // PodReadyBeforeTimeout is how long pods have to be "ready" when a test begins. PodReadyBeforeTimeout = 5 * time.Minute - // How long pods have to become scheduled onto nodes - podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second) - - podRespondingTimeout = 15 * time.Minute // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. ClaimProvisionTimeout = 5 * time.Minute @@ -214,13 +206,6 @@ var ( // For parsing Kubectl version for version-skewed testing. gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"") - // Slice of regexps for names of pods that have to be running to consider a Node "healthy" - requiredPerNodePods = []*regexp.Regexp{ - regexp.MustCompile(".*kube-proxy.*"), - regexp.MustCompile(".*fluentd-elasticsearch.*"), - regexp.MustCompile(".*node-problem-detector.*"), - } - // ServeHostnameImage is a serve hostname image name. ServeHostnameImage = imageutils.GetE2EImage(imageutils.Agnhost) ) @@ -438,7 +423,7 @@ func getDefaultClusterIPFamily(c clientset.Interface) string { // ProviderIs returns true if the provider is included is the providers. Otherwise false. func ProviderIs(providers ...string) bool { for _, provider := range providers { - if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) { + if strings.EqualFold(provider, TestContext.Provider) { return true } } @@ -448,7 +433,7 @@ func ProviderIs(providers ...string) bool { // MasterOSDistroIs returns true if the master OS distro is included in the supportedMasterOsDistros. Otherwise false. func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { for _, distro := range supportedMasterOsDistros { - if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) { + if strings.EqualFold(distro, TestContext.MasterOSDistro) { return true } } @@ -458,7 +443,7 @@ func MasterOSDistroIs(supportedMasterOsDistros ...string) bool { // NodeOSDistroIs returns true if the node OS distro is included in the supportedNodeOsDistros. Otherwise false. func NodeOSDistroIs(supportedNodeOsDistros ...string) bool { for _, distro := range supportedNodeOsDistros { - if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) { + if strings.EqualFold(distro, TestContext.NodeOSDistro) { return true } } @@ -522,32 +507,6 @@ func SkipIfMissingResource(dynamicClient dynamic.Interface, gvr schema.GroupVers // ProvidersWithSSH are those providers where each node is accessible with SSH var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"} -type podCondition func(pod *v1.Pod) (bool, error) - -// errorBadPodsStates create error message of basic info of bad pods for debugging. -func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { - errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) - // Print bad pods info only if there are fewer than 10 bad pods - if len(badPods) > 10 { - return errStr + "There are too many bad pods. Please check log for details." - } - - buf := bytes.NewBuffer(nil) - w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) - fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS") - for _, badPod := range badPods { - grace := "" - if badPod.DeletionGracePeriodSeconds != nil { - grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds) - } - podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v", - badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions) - fmt.Fprintln(w, podInfo) - } - w.Flush() - return errStr + buf.String() -} - // WaitForDaemonSets for all daemonsets in the given namespace to be ready // (defined as all but 'allowedNotReadyNodes' pods associated with that // daemonset are ready). @@ -1557,14 +1516,14 @@ func (b KubectlBuilder) ExecOrDie() string { func isTimeout(err error) bool { switch err := err.(type) { - case net.Error: - if err.Timeout() { - return true - } case *url.Error: if err, ok := err.Err.(net.Error); ok && err.Timeout() { return true } + case net.Error: + if err.Timeout() { + return true + } } return false } @@ -2472,6 +2431,9 @@ func RestartKubelet(host string) error { sudoPresent = true } sshResult, err = e2essh.SSH("systemctl --version", host, TestContext.Provider) + if err != nil { + return fmt.Errorf("Failed to execute command 'systemctl' on host %s with error %v", host, err) + } if !strings.Contains(sshResult.Stderr, "command not found") { cmd = "systemctl restart kubelet" } else { diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index d86ea312da5..fde1c8243fd 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -185,7 +185,7 @@ func NewGlusterfsServer(cs clientset.Interface, namespace string) (config TestCo }, }, } - endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints) + _, err := cs.CoreV1().Endpoints(namespace).Create(endpoints) framework.ExpectNoError(err, "failed to create endpoints for Gluster server") return config, pod, ip