Merge pull request #81695 from oomichi/ExpectEqual-node

Use framework functions under test/e2e/node/
This commit is contained in:
Kubernetes Prow Robot 2019-08-22 22:03:58 -07:00 committed by GitHub
commit 4a4d5f47d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 20 additions and 24 deletions

View File

@ -37,7 +37,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -273,7 +272,7 @@ var _ = SIGDescribe("kubelet", func() {
nodeLabels["kubelet_cleanup"] = "true"
nodes := framework.GetReadySchedulableNodesOrDie(c)
numNodes = len(nodes.Items)
gomega.Expect(numNodes).NotTo(gomega.BeZero())
framework.ExpectNotEqual(numNodes, 0)
nodeNames = sets.NewString()
// If there are a lot of nodes, we don't want to use all of them
// (if there are 1000 nodes in the cluster, starting 10 pods/node

View File

@ -28,7 +28,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
@ -88,7 +87,7 @@ var _ = SIGDescribe("Mount propagation", func() {
// Pick a node where all pods will run.
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero(), "No available nodes for scheduling")
framework.ExpectNotEqual(len(nodes.Items), 0, "No available nodes for scheduling")
node := &nodes.Items[0]
// Fail the test if the namespace is not set. We expect that the

View File

@ -60,7 +60,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
framework.ExpectNotEqual(len(nodes.Items), 0)
hosts := []string{}
for _, node := range nodes.Items {
for _, addr := range node.Status.Addresses {
@ -95,14 +95,14 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
psCmd := "ps aux | grep [n]ode-problem-detector"
result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
gomega.Expect(result.Code).To(gomega.BeZero())
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Stdout).To(gomega.ContainSubstring("node-problem-detector"))
ginkgo.By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
journalctlCmd := "sudo journalctl -u node-problem-detector"
result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
gomega.Expect(result.Code).To(gomega.BeZero())
framework.ExpectEqual(result.Code, 0)
gomega.Expect(result.Stdout).NotTo(gomega.ContainSubstring("node-problem-detector.service: Failed"))
if isStandaloneMode[host] {
@ -116,7 +116,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
_, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
gomega.Expect(result.Code).To(gomega.BeZero())
framework.ExpectEqual(result.Code, 0)
}
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
@ -221,22 +221,22 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat"
result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
gomega.Expect(result.Code).To(gomega.BeZero())
framework.ExpectEqual(result.Code, 0)
lines := strings.Split(result.Stdout, "\n")
memoryUsage, err := strconv.ParseFloat(lines[0], 64)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectNoError(err)
var totalInactiveFile float64
for _, line := range lines[1:] {
tokens := strings.Split(line, " ")
if tokens[0] == "total_rss" {
rss, err = strconv.ParseFloat(tokens[1], 64)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectNoError(err)
}
if tokens[0] == "total_inactive_file" {
totalInactiveFile, err = strconv.ParseFloat(tokens[1], 64)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectNoError(err)
}
}
@ -257,7 +257,7 @@ func getCPUStat(f *framework.Framework, host string) (usage, uptime float64) {
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err)
gomega.Expect(result.Code).To(gomega.BeZero())
framework.ExpectEqual(result.Code, 0)
lines := strings.Split(result.Stdout, "\n")
usage, err = strconv.ParseFloat(lines[0], 64)
@ -283,6 +283,6 @@ func getNpdPodStat(f *framework.Framework, nodeName string) (cpuUsage, rss, work
hasNpdPod = true
break
}
gomega.Expect(hasNpdPod).To(gomega.BeTrue())
framework.ExpectEqual(hasNpdPod, true)
return
}

View File

@ -36,7 +36,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -163,8 +162,8 @@ var _ = SIGDescribe("Pods Extended", func() {
})
framework.ExpectNoError(err, "kubelet never observed the termination notice")
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
framework.ExpectNotEqual(lastPod.DeletionTimestamp, nil)
framework.ExpectNotEqual(lastPod.Spec.TerminationGracePeriodSeconds, 0)
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
@ -222,7 +221,7 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("verifying QOS class is set on the pod")
pod, err := podClient.Get(name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to query for pod")
gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
framework.ExpectEqual(pod.Status.QOSClass, v1.PodQOSGuaranteed)
})
})
})

View File

@ -213,9 +213,9 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
testContent := "hello"
testFilePath := mountPath + "/TEST"
err = f.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectNoError(err)
content, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectNoError(err)
gomega.Expect(content).To(gomega.ContainSubstring(testContent))
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})

View File

@ -28,7 +28,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const dummyFinalizer = "k8s.io/dummy-finalizer"
@ -88,11 +87,11 @@ func testFinishedJob(f *framework.Framework) {
framework.ExpectNoError(err)
finishTime := jobutil.FinishTime(job)
finishTimeUTC := finishTime.UTC()
gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue())
framework.ExpectNotEqual(finishTime.IsZero(), true)
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
gomega.Expect(deleteAtUTC).NotTo(gomega.BeNil())
framework.ExpectNotEqual(deleteAtUTC, nil)
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
gomega.Expect(deleteAtUTC.Before(expireAtUTC)).To(gomega.BeFalse())
framework.ExpectEqual(deleteAtUTC.Before(expireAtUTC), false)
}