Merge pull request #123930 from Nordix/esotsal/fix_123928

Move DockerHung test in the end
This commit is contained in:
Kubernetes Prow Robot 2024-04-24 14:29:38 -07:00 committed by GitHub
commit 0a6101636d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -161,34 +161,6 @@ var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, func
gomega.Expect(result.Code).To(gomega.Equal(0))
}
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
for _, node := range nodes {
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
gomega.Eventually(ctx, func() error {
return verifyNodeCondition(ctx, f, "KernelDeadlock", v1.ConditionTrue, "DockerHung", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted DockerHung event on node %q", node.Name))
eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()}
gomega.Eventually(ctx, func(ctx context.Context) error {
return verifyEvents(ctx, f, eventListOptions, 1, "DockerHung", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
if checkForKubeletStart {
// Node problem detector reports kubelet start events automatically starting from NPD v0.7.0+.
// Since Kubelet may be restarted for a few times after node is booted. We just check the event
// is detected, but do not check how many times Kubelet is started.
//
// Some test suites run for hours and KubeletStart event will already be cleaned up
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KubeletStart event on node %q", node.Name))
gomega.Eventually(ctx, func(ctx context.Context) error {
return verifyEventExists(ctx, f, eventListOptions, "KubeletStart", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
} else {
ginkgo.By("KubeletStart event will NOT be checked")
}
}
ginkgo.By("Gather node-problem-detector cpu and memory stats")
numIterations := 60
for i := 1; i <= numIterations; i++ {
@ -237,6 +209,35 @@ var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, func
workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1])
}
framework.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
ginkgo.By("Check node-problem-detector can post conditions and events to API server")
for _, node := range nodes {
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KernelDeadlock condition on node %q", node.Name))
gomega.Eventually(ctx, func() error {
return verifyNodeCondition(ctx, f, "KernelDeadlock", v1.ConditionTrue, "DockerHung", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted DockerHung event on node %q", node.Name))
eventListOptions := metav1.ListOptions{FieldSelector: fields.Set{"involvedObject.kind": "Node"}.AsSelector().String()}
gomega.Eventually(ctx, func(ctx context.Context) error {
return verifyEvents(ctx, f, eventListOptions, 1, "DockerHung", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
if checkForKubeletStart {
// Node problem detector reports kubelet start events automatically starting from NPD v0.7.0+.
// Since Kubelet may be restarted for a few times after node is booted. We just check the event
// is detected, but do not check how many times Kubelet is started.
//
// Some test suites run for hours and KubeletStart event will already be cleaned up
ginkgo.By(fmt.Sprintf("Check node-problem-detector posted KubeletStart event on node %q", node.Name))
gomega.Eventually(ctx, func(ctx context.Context) error {
return verifyEventExists(ctx, f, eventListOptions, "KubeletStart", node.Name)
}, pollTimeout, pollInterval).Should(gomega.Succeed())
} else {
ginkgo.By("KubeletStart event will NOT be checked")
}
}
})
})