fix golint error in e2e/node

This commit is contained in:
danielqsj 2019-05-10 12:37:01 +08:00
parent 73c2daeeea
commit 124efde4f8
5 changed files with 9 additions and 9 deletions

View File

@ -606,7 +606,6 @@ test/e2e/chaosmonkey
test/e2e/common test/e2e/common
test/e2e/framework test/e2e/framework
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/node
test/e2e/scalability test/e2e/scalability
test/e2e/scheduling test/e2e/scheduling
test/e2e/storage/drivers test/e2e/storage/drivers

View File

@ -85,18 +85,18 @@ var _ = SIGDescribe("Events", func() {
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("retrieving the pod") ginkgo.By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{}) podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod: %v", err) framework.Failf("Failed to get pod: %v", err)
} }
e2elog.Logf("%+v\n", podWithUid) e2elog.Logf("%+v\n", podWithUID)
var events *v1.EventList var events *v1.EventList
// Check for scheduler event about the pod. // Check for scheduler event about the pod.
ginkgo.By("checking for scheduler event about the pod") ginkgo.By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUID.UID),
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": v1.DefaultSchedulerName, "source": v1.DefaultSchedulerName,
}.AsSelector().String() }.AsSelector().String()
@ -115,7 +115,7 @@ var _ = SIGDescribe("Events", func() {
ginkgo.By("checking for kubelet event about the pod") ginkgo.By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{ selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUID.UID),
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": "kubelet", "source": "kubelet",

View File

@ -18,6 +18,7 @@ package node
import "k8s.io/kubernetes/test/e2e/framework" import "k8s.io/kubernetes/test/e2e/framework"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool { func SIGDescribe(text string, body func()) bool {
return framework.KubeDescribe("[sig-node] "+text, body) return framework.KubeDescribe("[sig-node] "+text, body)
} }

View File

@ -102,7 +102,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed")) gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
if isStandaloneMode[host] { if isStandaloneMode[host] {
cpuUsage, uptime := getCpuStat(f, host) cpuUsage, uptime := getCPUStat(f, host)
cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage)
uptimeStats[host] = append(uptimeStats[host], uptime) uptimeStats[host] = append(uptimeStats[host], uptime)
} }
@ -138,7 +138,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
rssStats[host] = append(rssStats[host], rss) rssStats[host] = append(rssStats[host], rss)
workingSetStats[host] = append(workingSetStats[host], workingSet) workingSetStats[host] = append(workingSetStats[host], workingSet)
if i == numIterations { if i == numIterations {
cpuUsage, uptime := getCpuStat(f, host) cpuUsage, uptime := getCPUStat(f, host)
cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage) cpuUsageStats[host] = append(cpuUsageStats[host], cpuUsage)
uptimeStats[host] = append(uptimeStats[host], uptime) uptimeStats[host] = append(uptimeStats[host], uptime)
} }
@ -249,7 +249,7 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
return return
} }
func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) { func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) {
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'" cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -36,7 +36,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
// partially cloned from webserver.go // State partially cloned from webserver.go
type State struct { type State struct {
Received map[string]int Received map[string]int
} }