Merge pull request #95166 from vishsangale/master-to-control-plane-test-e2e-GetMasterHost

Updated symbol 'framework.GetMasterHost' to not use word 'master'
This commit is contained in:
Kubernetes Prow Robot 2020-10-01 11:49:19 -07:00 committed by GitHub
commit 05b3e8911a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 27 additions and 27 deletions

View File

@ -99,7 +99,7 @@ func doEtcdFailure(failCommand, fixCommand string) {
}
func masterExec(cmd string) {
host := framework.GetMasterHost() + ":22"
host := framework.APIAddress() + ":22"
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
framework.ExpectNoError(err, "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
if result.Code != 0 {

View File

@ -263,7 +263,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout)
framework.APIAddress(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout)
restarter.restart()
// The intent is to ensure the replication controller manager has observed and reported status of
@ -294,7 +294,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Requires master ssh access.
e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-scheduler", kubeschedulerconfig.DefaultInsecureSchedulerPort, restartPollInterval, restartTimeout)
framework.APIAddress(), "kube-scheduler", kubeschedulerconfig.DefaultInsecureSchedulerPort, restartPollInterval, restartTimeout)
// Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size.

View File

@ -449,7 +449,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
host := framework.GetMasterHost() + ":22"
host := framework.APIAddress() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)

View File

@ -585,7 +585,7 @@ func traceRouteToMaster() {
framework.Logf("Could not find traceroute program")
return
}
cmd := exec.Command(traceroute, "-I", framework.GetMasterHost())
cmd := exec.Command(traceroute, "-I", framework.APIAddress())
out, err := cmd.Output()
if len(out) != 0 {
framework.Logf(string(out))

View File

@ -37,7 +37,7 @@ func RestartControllerManager() error {
}
cmd := "pidof kube-controller-manager | xargs sudo kill"
framework.Logf("Restarting controller-manager via ssh, running: %v", cmd)
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
@ -49,7 +49,7 @@ func RestartControllerManager() error {
func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(framework.InsecureKubeControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
}

View File

@ -159,7 +159,7 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := e2essh.NodeSSHHosts(c)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"
instanceAddress := APIAddress() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo)
@ -167,8 +167,8 @@ func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVeri
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress,
data: prepareData(instanceAddress, nodeAddresses),
masterAddress: instanceAddress,
nodeAddresses: nodeAddresses,
wg: sync.WaitGroup{},
workChannel: workChannel,

View File

@ -604,7 +604,7 @@ type kubemarkResourceUsage struct {
}
func getMasterUsageByPrefix(prefix string) (string, error) {
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), APIAddress()+":22", TestContext.Provider)
if err != nil {
return "", err
}

View File

@ -166,11 +166,11 @@ var RunID = uuid.NewUUID()
// CreateTestingNSFn is a func that is responsible for creating namespace used for executing e2e tests.
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
// GetMasterHost returns a hostname of a master.
func GetMasterHost() string {
masterURL, err := url.Parse(TestContext.Host)
// APIAddress returns a address of an instance.
func APIAddress() string {
instanceURL, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterURL.Hostname()
return instanceURL.Hostname()
}
// ProviderIs returns true if the provider is included is the providers. Otherwise false.

View File

@ -3769,7 +3769,7 @@ func sshRestartMaster() error {
command = "sudo /etc/init.d/kube-apiserver restart"
}
framework.Logf("Restarting master via ssh, running: %v", command)
result, err := e2essh.SSH(command, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
result, err := e2essh.SSH(command, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)

View File

@ -81,8 +81,8 @@ func installFlex(c clientset.Interface, node *v1.Node, vendor, driver, filePath
host, err = e2enode.GetInternalIP(node)
}
} else {
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
instanceWithPort := framework.APIAddress()
hostName := getHostFromHostPort(instanceWithPort)
host = net.JoinHostPort(hostName, e2essh.SSHPort)
}
@ -113,8 +113,8 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string)
host, err = e2enode.GetInternalIP(node)
}
} else {
masterHostWithPort := framework.GetMasterHost()
hostName := getHostFromHostPort(masterHostWithPort)
instanceWithPort := framework.APIAddress()
hostName := getHostFromHostPort(instanceWithPort)
host = net.JoinHostPort(hostName, e2essh.SSHPort)
}

View File

@ -52,7 +52,7 @@ func checkForControllerManagerHealthy(duration time.Duration) error {
var PID string
cmd := "pidof kube-controller-manager"
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.GetMasterHost(), e2essh.SSHPort), framework.TestContext.Provider)
result, err := e2essh.SSH(cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil {
// We don't necessarily know that it crashed, pipe could just be broken
e2essh.LogResult(result)

View File

@ -167,13 +167,13 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
expectVolumeToBeAttached(nodeName, volumePath)
}
ginkgo.By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22"
err := restartKubelet(masterAddress)
framework.ExpectNoError(err, "Unable to restart kubelet on master node")
ginkgo.By("Restarting kubelet on instance node")
instanceAddress := framework.APIAddress() + ":22"
err := restartKubelet(instanceAddress)
framework.ExpectNoError(err, "Unable to restart kubelet on instance node")
ginkgo.By("Verifying the kubelet on master node is up")
err = waitForKubeletUp(masterAddress)
ginkgo.By("Verifying the kubelet on instance node is up")
err = waitForKubeletUp(instanceAddress)
framework.ExpectNoError(err)
for i, pod := range pods {