mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #45423 from jeffvance/e2e-nodeExec
Automatic merge from submit-queue move from daemon_restart.go to framework/util.go **What this PR does / why we need it**: Moves the func `nodeExec` from daemon_restart.go to framework/util.go. This is the correct file for this func and is a more intuitive pkg for other callers to use. This is a small step of the larger effort of restructuring e2e tests to be more logically structured and easier for newcomers to understand. ```release-note NONE ``` cc @timothysc @copejon
This commit is contained in:
commit
5c23dc7897
@ -56,15 +56,6 @@ const (
|
||||
UPDATE = "UPDATE"
|
||||
)
|
||||
|
||||
// nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
|
||||
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
||||
// cloud providers since it involves ssh.
|
||||
func nodeExec(nodeName, cmd string) (framework.SSHResult, error) {
|
||||
result, err := framework.SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return result, err
|
||||
}
|
||||
|
||||
// restartDaemonConfig is a config to restart a running daemon on a node, and wait till
|
||||
// it comes back up. It uses ssh to send a SIGTERM to the daemon.
|
||||
type restartDaemonConfig struct {
|
||||
@ -100,7 +91,7 @@ func (r *restartDaemonConfig) waitUp() {
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := nodeExec(r.nodeName, healthzCheck)
|
||||
result, err := framework.NodeExec(r.nodeName, healthzCheck)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
@ -120,7 +111,8 @@ func (r *restartDaemonConfig) waitUp() {
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *restartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
nodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||
|
@ -180,6 +180,8 @@ const (
|
||||
|
||||
// Serve hostname image name
|
||||
ServeHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
|
||||
// ssh port
|
||||
sshPort = "22"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -3781,7 +3783,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
|
||||
sshHosts := make([]string, 0, len(hosts))
|
||||
for _, h := range hosts {
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
|
||||
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
|
||||
}
|
||||
return sshHosts, nil
|
||||
}
|
||||
@ -3795,6 +3797,13 @@ type SSHResult struct {
|
||||
Code int
|
||||
}
|
||||
|
||||
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
|
||||
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
||||
// cloud providers since it involves ssh.
|
||||
func NodeExec(nodeName, cmd string) (SSHResult, error) {
|
||||
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
|
||||
}
|
||||
|
||||
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
|
||||
// is no error performing the SSH, the stdout, stderr, and exit code are
|
||||
// returned.
|
||||
@ -3835,7 +3844,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP {
|
||||
host = a.Address + ":22"
|
||||
host = net.JoinHostPort(a.Address, sshPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -4413,7 +4422,7 @@ func sshRestartMaster() error {
|
||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
Logf("Restarting master via ssh, running: %v", command)
|
||||
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
|
||||
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
return fmt.Errorf("couldn't restart apiserver: %v", err)
|
||||
@ -5381,7 +5390,7 @@ func GetNodeExternalIP(node *v1.Node) string {
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP {
|
||||
host = a.Address + ":22"
|
||||
host = net.JoinHostPort(a.Address, sshPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -261,7 +261,8 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
||||
for _, test := range tests {
|
||||
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
|
||||
err = wait.Poll(poll, timeout, func() (bool, error) {
|
||||
result, _ := nodeExec(nodeIP, test.cmd)
|
||||
result, err := framework.NodeExec(nodeIP, test.cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.LogSSHResult(result)
|
||||
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
|
||||
if expectClean && ok { // keep trying
|
||||
|
Loading…
Reference in New Issue
Block a user