mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #84886 from YangLu1031/daemonRestartTest
Verify kubelet & kube-proxy recovery on Windows nodes
This commit is contained in:
commit
78c56e6b4c
@ -92,9 +92,12 @@ func (r *RestartDaemonConfig) String() string {
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *RestartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
nullDev := "/dev/null"
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
nullDev = "NUL"
|
||||
}
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
@ -115,8 +118,12 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName)
|
||||
}
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
|
||||
_, err := e2essh.NodeExec(r.nodeName, killCmd, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -319,4 +326,18 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("Kube-proxy should recover after being killed accidentally", func() {
|
||||
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout)
|
||||
// restart method will kill the kube-proxy process and wait for recovery,
|
||||
// if not able to recover, will throw test failure.
|
||||
restarter.restart()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user