mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Merge pull request #89784 from oomichi/sshPort
Add common SSHPort on e2essh
This commit is contained in:
commit
7bd48eb3f6
@ -26,11 +26,6 @@ import (
|
|||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// ssh port
|
|
||||||
sshPort = "22"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RestartControllerManager restarts the kube-controller-manager.
|
// RestartControllerManager restarts the kube-controller-manager.
|
||||||
func RestartControllerManager() error {
|
func RestartControllerManager() error {
|
||||||
// TODO: Make it work for all providers and distros.
|
// TODO: Make it work for all providers and distros.
|
||||||
@ -42,7 +37,7 @@ func RestartControllerManager() error {
|
|||||||
}
|
}
|
||||||
cmd := "pidof kube-controller-manager | xargs sudo kill"
|
cmd := "pidof kube-controller-manager | xargs sudo kill"
|
||||||
framework.Logf("Restarting controller-manager via ssh, running: %v", cmd)
|
framework.Logf("Restarting controller-manager via ssh, running: %v", cmd)
|
||||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), sshPort), framework.TestContext.Provider)
|
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
|
||||||
if err != nil || result.Code != 0 {
|
if err != nil || result.Code != 0 {
|
||||||
e2essh.LogResult(result)
|
e2essh.LogResult(result)
|
||||||
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
||||||
@ -54,7 +49,7 @@ func RestartControllerManager() error {
|
|||||||
func WaitForControllerManagerUp() error {
|
func WaitForControllerManagerUp() error {
|
||||||
cmd := "curl http://localhost:" + strconv.Itoa(framework.InsecureKubeControllerManagerPort) + "/healthz"
|
cmd := "curl http://localhost:" + strconv.Itoa(framework.InsecureKubeControllerManagerPort) + "/healthz"
|
||||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), sshPort), framework.TestContext.Provider)
|
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
|
||||||
if err != nil || result.Code != 0 {
|
if err != nil || result.Code != 0 {
|
||||||
e2essh.LogResult(result)
|
e2essh.LogResult(result)
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ssh port
|
// SSHPort is tcp port number of SSH
|
||||||
sshPort = "22"
|
SSHPort = "22"
|
||||||
|
|
||||||
// pollNodeInterval is how often to Poll pods.
|
// pollNodeInterval is how often to Poll pods.
|
||||||
pollNodeInterval = 2 * time.Second
|
pollNodeInterval = 2 * time.Second
|
||||||
@ -136,7 +136,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
|||||||
|
|
||||||
sshHosts := make([]string, 0, len(hosts))
|
sshHosts := make([]string, 0, len(hosts))
|
||||||
for _, h := range hosts {
|
for _, h := range hosts {
|
||||||
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
|
sshHosts = append(sshHosts, net.JoinHostPort(h, SSHPort))
|
||||||
}
|
}
|
||||||
return sshHosts, nil
|
return sshHosts, nil
|
||||||
}
|
}
|
||||||
@ -155,7 +155,7 @@ type Result struct {
|
|||||||
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
||||||
// cloud providers since it involves ssh.
|
// cloud providers since it involves ssh.
|
||||||
func NodeExec(nodeName, cmd, provider string) (Result, error) {
|
func NodeExec(nodeName, cmd, provider string) (Result, error) {
|
||||||
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), provider)
|
return SSH(cmd, net.JoinHostPort(nodeName, SSHPort), provider)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
|
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
|
||||||
@ -330,7 +330,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
|||||||
host := ""
|
host := ""
|
||||||
for _, a := range node.Status.Addresses {
|
for _, a := range node.Status.Addresses {
|
||||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||||
host = net.JoinHostPort(a.Address, sshPort)
|
host = net.JoinHostPort(a.Address, SSHPort)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
|
|||||||
// No external IPs were found, let's try to use internal as plan B
|
// No external IPs were found, let's try to use internal as plan B
|
||||||
for _, a := range node.Status.Addresses {
|
for _, a := range node.Status.Addresses {
|
||||||
if a.Type == v1.NodeInternalIP && a.Address != "" {
|
if a.Type == v1.NodeInternalIP && a.Address != "" {
|
||||||
host = net.JoinHostPort(a.Address, sshPort)
|
host = net.JoinHostPort(a.Address, SSHPort)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -77,9 +77,6 @@ const (
|
|||||||
// AffinityConfirmCount is the number of needed continuous requests to confirm that
|
// AffinityConfirmCount is the number of needed continuous requests to confirm that
|
||||||
// affinity is enabled.
|
// affinity is enabled.
|
||||||
AffinityConfirmCount = 15
|
AffinityConfirmCount = 15
|
||||||
|
|
||||||
// ssh port
|
|
||||||
sshPort = "22"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -3579,7 +3576,7 @@ func sshRestartMaster() error {
|
|||||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||||
}
|
}
|
||||||
framework.Logf("Restarting master via ssh, running: %v", command)
|
framework.Logf("Restarting master via ssh, running: %v", command)
|
||||||
result, err := e2essh.SSH(command, net.JoinHostPort(framework.GetMasterHost(), sshPort), framework.TestContext.Provider)
|
result, err := e2essh.SSH(command, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
|
||||||
if err != nil || result.Code != 0 {
|
if err != nil || result.Code != 0 {
|
||||||
e2essh.LogResult(result)
|
e2essh.LogResult(result)
|
||||||
return fmt.Errorf("couldn't restart apiserver: %v", err)
|
return fmt.Errorf("couldn't restart apiserver: %v", err)
|
||||||
|
@ -38,7 +38,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
sshPort = "22"
|
|
||||||
driverDir = "test/e2e/testing-manifests/flexvolume/"
|
driverDir = "test/e2e/testing-manifests/flexvolume/"
|
||||||
defaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
|
defaultVolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec"
|
||||||
// TODO: change this and config-test.sh when default flex volume install path is changed for GCI
|
// TODO: change this and config-test.sh when default flex volume install path is changed for GCI
|
||||||
@ -84,7 +83,7 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
|
|||||||
} else {
|
} else {
|
||||||
masterHostWithPort := framework.GetMasterHost()
|
masterHostWithPort := framework.GetMasterHost()
|
||||||
hostName := getHostFromHostPort(masterHostWithPort)
|
hostName := getHostFromHostPort(masterHostWithPort)
|
||||||
host = net.JoinHostPort(hostName, sshPort)
|
host = net.JoinHostPort(hostName, e2essh.SSHPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -113,7 +112,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
|
|||||||
} else {
|
} else {
|
||||||
masterHostWithPort := framework.GetMasterHost()
|
masterHostWithPort := framework.GetMasterHost()
|
||||||
hostName := getHostFromHostPort(masterHostWithPort)
|
hostName := getHostFromHostPort(masterHostWithPort)
|
||||||
host = net.JoinHostPort(hostName, sshPort)
|
host = net.JoinHostPort(hostName, e2essh.SSHPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
if host == "" {
|
if host == "" {
|
||||||
|
@ -52,7 +52,7 @@ func checkForControllerManagerHealthy(duration time.Duration) error {
|
|||||||
var PID string
|
var PID string
|
||||||
cmd := "pidof kube-controller-manager"
|
cmd := "pidof kube-controller-manager"
|
||||||
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
|
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
|
||||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), sshPort), framework.TestContext.Provider)
|
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// We don't necessarily know that it crashed, pipe could just be broken
|
// We don't necessarily know that it crashed, pipe could just be broken
|
||||||
e2essh.LogResult(result)
|
e2essh.LogResult(result)
|
||||||
|
Loading…
Reference in New Issue
Block a user