mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #77336 from johnSchnake/refactorFrameworkSSH
Move ssh code to new package
This commit is contained in:
commit
b3875556b0
@ -82,6 +82,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/crd:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -95,10 +96,10 @@ func doEtcdFailure(failCommand, fixCommand string) {
|
||||
|
||||
func masterExec(cmd string) {
|
||||
host := framework.GetMasterHost() + ":22"
|
||||
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
|
||||
if result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
}
|
||||
}
|
||||
|
@ -64,6 +64,7 @@ go_library(
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -93,7 +94,7 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := framework.NodeExec(r.nodeName, healthzCheck)
|
||||
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
@ -113,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,6 @@ go_library(
|
||||
"resource_usage_gatherer.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"ssh.go",
|
||||
"statefulset_utils.go",
|
||||
"test_context.go",
|
||||
"util.go",
|
||||
@ -59,7 +58,6 @@ go_library(
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
@ -113,6 +111,7 @@ go_library(
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
@ -125,7 +124,6 @@ go_library(
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
@ -162,6 +160,7 @@ filegroup(
|
||||
"//test/e2e/framework/providers/openstack:all-srcs",
|
||||
"//test/e2e/framework/providers/vsphere:all-srcs",
|
||||
"//test/e2e/framework/replicaset:all-srcs",
|
||||
"//test/e2e/framework/ssh:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/viperconfig:all-srcs",
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// KubemarkResourceUsage is a struct for tracking the resource usage of kubemark.
|
||||
@ -30,7 +32,7 @@ type KubemarkResourceUsage struct {
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -154,7 +155,7 @@ func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int
|
||||
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(c)
|
||||
nodeAddresses, err := e2essh.NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
masterAddress := GetMasterHost() + ":22"
|
||||
|
||||
@ -250,7 +251,7 @@ func (g *LogSizeGatherer) Work() bool {
|
||||
return false
|
||||
case workItem = <-g.workChannel:
|
||||
}
|
||||
sshResult, err := SSH(
|
||||
sshResult, err := e2essh.SSH(
|
||||
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
|
||||
workItem.ip,
|
||||
TestContext.Provider,
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
schedulermetric "k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
@ -329,7 +330,7 @@ func getEtcdMetrics() ([]*model.Sample, error) {
|
||||
}
|
||||
|
||||
cmd := "curl http://localhost:2379/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
@ -656,7 +657,7 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
|
||||
}
|
||||
|
||||
cmd := "curl -X " + opUpper + " http://localhost:10251/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return "", fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
|
@ -25,9 +25,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
// EtcdUpgrade upgrades etcd on GCE.
|
||||
@ -351,7 +352,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
defer wg.Done()
|
||||
|
||||
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||
err := IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||
return
|
||||
@ -360,7 +361,7 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
|
||||
time.Sleep(k.config.SimulatedDowntime)
|
||||
|
||||
Logf("Rebooting %q to repair the node", node.Name)
|
||||
err = IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||
if err != nil {
|
||||
Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||
return
|
||||
|
@ -25,6 +25,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -93,7 +95,7 @@ func gatherProfile(componentName, profileBaseName, profileKind string) error {
|
||||
|
||||
// Get the profile data over SSH.
|
||||
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
|
||||
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -1379,9 +1380,9 @@ func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expect
|
||||
func() string {
|
||||
cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -")
|
||||
Logf("Executing cmd %q on host %v", cmd, host)
|
||||
result, err := SSH(cmd, host, TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
Logf("error while SSH-ing to node: %v", err)
|
||||
}
|
||||
return result.Stdout
|
||||
@ -1447,9 +1448,9 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
||||
"curl -g -s --connect-timeout 2 http://%s && exit 99", ipPort)
|
||||
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(command, host, TestContext.Provider)
|
||||
result, err := e2essh.SSH(command, host, TestContext.Provider)
|
||||
if err != nil {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
Logf("error while SSH-ing to node: %v", err)
|
||||
}
|
||||
if result.Code != 99 {
|
||||
|
34
test/e2e/framework/ssh/BUILD
Normal file
34
test/e2e/framework/ssh/BUILD
Normal file
@ -0,0 +1,34 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ssh.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ssh",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -24,11 +24,29 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
"golang.org/x/crypto/ssh"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
sshutil "k8s.io/kubernetes/pkg/ssh"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// ssh port
|
||||
sshPort = "22"
|
||||
|
||||
// pollNodeInterval is how often to Poll pods.
|
||||
pollNodeInterval = 2 * time.Second
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
|
||||
@ -86,15 +104,15 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
||||
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
nodelist := waitListSchedulableNodesOrDie(c)
|
||||
|
||||
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
|
||||
hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
|
||||
// If ExternalIPs aren't set, assume the test programs can reach the
|
||||
// InternalIP. Simplified exception logic here assumes that the hosts will
|
||||
// either all have ExternalIP or none will. Simplifies handling here and
|
||||
// should be adequate since the setting of the external IPs is provider
|
||||
// specific: they should either all have them or none of them will.
|
||||
if len(hosts) == 0 {
|
||||
Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
hosts = NodeAddresses(nodelist, v1.NodeInternalIP)
|
||||
e2elog.Logf("No external IP address on nodes, falling back to internal IPs")
|
||||
hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
|
||||
}
|
||||
|
||||
// Error if any node didn't have an external/internal IP.
|
||||
@ -111,8 +129,8 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
|
||||
return sshHosts, nil
|
||||
}
|
||||
|
||||
// SSHResult holds the execution result of SSH command
|
||||
type SSHResult struct {
|
||||
// Result holds the execution result of SSH command
|
||||
type Result struct {
|
||||
User string
|
||||
Host string
|
||||
Cmd string
|
||||
@ -124,15 +142,15 @@ type SSHResult struct {
|
||||
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
|
||||
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
|
||||
// cloud providers since it involves ssh.
|
||||
func NodeExec(nodeName, cmd string) (SSHResult, error) {
|
||||
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
|
||||
func NodeExec(nodeName, cmd, provider string) (Result, error) {
|
||||
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), provider)
|
||||
}
|
||||
|
||||
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
|
||||
// is no error performing the SSH, the stdout, stderr, and exit code are
|
||||
// returned.
|
||||
func SSH(cmd, host, provider string) (SSHResult, error) {
|
||||
result := SSHResult{Host: host, Cmd: cmd}
|
||||
func SSH(cmd, host, provider string) (Result, error) {
|
||||
result := Result{Host: host, Cmd: cmd}
|
||||
|
||||
// Get a signer for the provider.
|
||||
signer, err := GetSigner(provider)
|
||||
@ -231,18 +249,18 @@ func RunSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
|
||||
return bout.String(), berr.String(), code, err
|
||||
}
|
||||
|
||||
// LogSSHResult records SSHResult log
|
||||
func LogSSHResult(result SSHResult) {
|
||||
// LogResult records result log
|
||||
func LogResult(result Result) {
|
||||
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
|
||||
Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd)
|
||||
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout)
|
||||
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr)
|
||||
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code)
|
||||
}
|
||||
|
||||
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
|
||||
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
|
||||
Logf("Getting external IP address for %s", node.Name)
|
||||
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
|
||||
e2elog.Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
if a.Type == v1.NodeExternalIP && a.Address != "" {
|
||||
@ -265,9 +283,9 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
|
||||
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
|
||||
}
|
||||
|
||||
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
|
||||
result, err := SSH(cmd, host, provider)
|
||||
LogSSHResult(result)
|
||||
LogResult(result)
|
||||
|
||||
if result.Code != 0 || err != nil {
|
||||
return nil, fmt.Errorf("failed running %q: %v (exit code %d, stderr %v)",
|
||||
@ -285,3 +303,61 @@ func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nodeAddresses returns the first address of the given type of each node.
|
||||
func nodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
|
||||
hosts := []string{}
|
||||
for _, n := range nodelist.Items {
|
||||
for _, addr := range n.Status.Addresses {
|
||||
if addr.Type == addrType && addr.Address != "" {
|
||||
hosts = append(hosts, addr.Address)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
||||
// waitListSchedulableNodes is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
|
||||
var nodes *v1.NodeList
|
||||
var err error
|
||||
if wait.PollImmediate(pollNodeInterval, singleCallTimeout, func() (bool, error) {
|
||||
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}) != nil {
|
||||
return nodes, err
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
|
||||
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
|
||||
nodes, err := waitListSchedulableNodes(c)
|
||||
if err != nil {
|
||||
expectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func expectNoError(err error, explain ...interface{}) {
|
||||
expectNoErrorWithOffset(1, err, explain...)
|
||||
}
|
||||
|
||||
// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
@ -93,6 +93,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
@ -321,7 +322,7 @@ func SkipUnlessLocalEphemeralStorageEnabled() {
|
||||
|
||||
// SkipUnlessSSHKeyPresent skips if no SSH key is found.
|
||||
func SkipUnlessSSHKeyPresent() {
|
||||
if _, err := GetSigner(TestContext.Provider); err != nil {
|
||||
if _, err := e2essh.GetSigner(TestContext.Provider); err != nil {
|
||||
skipInternalf(1, "No SSH Key for provider %s: '%v'", TestContext.Provider, err)
|
||||
}
|
||||
}
|
||||
@ -3744,21 +3745,21 @@ func RestartKubeProxy(host string) error {
|
||||
}
|
||||
// kubelet will restart the kube-proxy since it's running in a static pod
|
||||
Logf("Killing kube-proxy on node %v", host)
|
||||
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
|
||||
result, err := e2essh.SSH("sudo pkill kube-proxy", host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
|
||||
}
|
||||
// wait for kube-proxy to come back up
|
||||
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
|
||||
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
|
||||
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
|
||||
result, err := SSH(sshCmd, host, TestContext.Provider)
|
||||
result, err := e2essh.SSH(sshCmd, host, TestContext.Provider)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
|
||||
}
|
||||
if result.Stdout == "0\n" {
|
||||
@ -3789,14 +3790,14 @@ func RestartKubelet(host string) error {
|
||||
cmd = "sudo /etc/init.d/kubelet restart"
|
||||
} else if ProviderIs("vsphere") {
|
||||
var sudoPresent bool
|
||||
sshResult, err := SSH("sudo --version", host, TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH("sudo --version", host, TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to ssh to host %s with error %v", host, err)
|
||||
}
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
sudoPresent = true
|
||||
}
|
||||
sshResult, err = SSH("systemctl --version", host, TestContext.Provider)
|
||||
sshResult, err = e2essh.SSH("systemctl --version", host, TestContext.Provider)
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
cmd = "systemctl restart kubelet"
|
||||
} else {
|
||||
@ -3809,9 +3810,9 @@ func RestartKubelet(host string) error {
|
||||
cmd = "sudo systemctl restart kubelet"
|
||||
}
|
||||
Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd)
|
||||
result, err := SSH(cmd, host, TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't restart kubelet: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -3821,9 +3822,9 @@ func RestartKubelet(host string) error {
|
||||
func WaitForKubeletUp(host string) error {
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, host, TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
}
|
||||
if result.Stdout == "ok" {
|
||||
return nil
|
||||
@ -3868,9 +3869,9 @@ func sshRestartMaster() error {
|
||||
command = "sudo /etc/init.d/kube-apiserver restart"
|
||||
}
|
||||
Logf("Restarting master via ssh, running: %v", command)
|
||||
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
result, err := e2essh.SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't restart apiserver: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -3934,9 +3935,9 @@ func RestartControllerManager() error {
|
||||
}
|
||||
cmd := "pidof kube-controller-manager | xargs sudo kill"
|
||||
Logf("Restarting controller-manager via ssh, running: %v", cmd)
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't restart controller-manager: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -3946,9 +3947,9 @@ func RestartControllerManager() error {
|
||||
func WaitForControllerManagerUp() error {
|
||||
cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz"
|
||||
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
}
|
||||
if result.Stdout == "ok" {
|
||||
return nil
|
||||
@ -3962,13 +3963,13 @@ func CheckForControllerManagerHealthy(duration time.Duration) error {
|
||||
var PID string
|
||||
cmd := "pidof kube-controller-manager"
|
||||
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
|
||||
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
|
||||
if err != nil {
|
||||
// We don't necessarily know that it crashed, pipe could just be broken
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("master unreachable after %v", time.Since(start))
|
||||
} else if result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
|
||||
} else if result.Stdout != PID {
|
||||
if PID == "" {
|
||||
@ -4311,8 +4312,8 @@ func BlockNetwork(from string, to string) {
|
||||
Logf("block network traffic from %s to %s", from, to)
|
||||
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
|
||||
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
|
||||
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
|
||||
LogSSHResult(result)
|
||||
if result, err := e2essh.SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
|
||||
e2essh.LogResult(result)
|
||||
Failf("Unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -4329,11 +4330,11 @@ func UnblockNetwork(from string, to string) {
|
||||
// may fail). Manual intervention is required in such case (recreating the
|
||||
// cluster solves the problem too).
|
||||
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
|
||||
result, err := SSH(undropCmd, from, TestContext.Provider)
|
||||
result, err := e2essh.SSH(undropCmd, from, TestContext.Provider)
|
||||
if result.Code == 0 && err == nil {
|
||||
return true, nil
|
||||
}
|
||||
LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
if err != nil {
|
||||
Logf("Unexpected error: %v", err)
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ go_library(
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/lifecycle:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//test/e2e/upgrades/apps:go_default_library",
|
||||
"//test/e2e/upgrades/storage:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -356,11 +357,11 @@ func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface,
|
||||
addonTestPollTimeout))
|
||||
}
|
||||
|
||||
// TODO use the framework.SSH code, either adding an SCP to it or copying files
|
||||
// TODO use the ssh.SSH code, either adding an SCP to it or copying files
|
||||
// differently.
|
||||
func getMasterSSHClient() (*ssh.Client, error) {
|
||||
// Get a signer for the provider.
|
||||
signer, err := framework.GetSigner(framework.TestContext.Provider)
|
||||
signer, err := e2essh.GetSigner(framework.TestContext.Provider)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -56,7 +57,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
// These tests requires SSH to nodes, so the provider check should be identical to there
|
||||
// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).
|
||||
// (the limiting factor is the implementation of util.go's e2essh.GetSigner(...)).
|
||||
|
||||
// Cluster must support node reboot
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
@ -266,7 +267,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
||||
}
|
||||
|
||||
// Reboot the node.
|
||||
if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
|
||||
if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil {
|
||||
e2elog.Logf("Error while issuing ssh command: %v", err)
|
||||
return false
|
||||
}
|
||||
@ -299,7 +300,7 @@ func catLogHook(logPath string) terminationHook {
|
||||
return func(provider string, nodes *v1.NodeList) {
|
||||
for _, n := range nodes.Items {
|
||||
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
|
||||
if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
|
||||
if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
|
||||
e2elog.Logf("Error while issuing ssh command: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ go_library(
|
||||
"//test/e2e/framework/ingress:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/images/net/nat"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -79,7 +80,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
zero := int64(0)
|
||||
|
||||
// Some distributions (Ubuntu 16.04 etc.) don't support the proc file.
|
||||
_, err := framework.IssueSSHCommandWithResult(
|
||||
_, err := e2essh.IssueSSHCommandWithResult(
|
||||
"ls /proc/net/nf_conntrack",
|
||||
framework.TestContext.Provider,
|
||||
clientNodeInfo.node)
|
||||
@ -181,7 +182,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
By("Checking /proc/net/nf_conntrack for the timeout")
|
||||
// If test flakes occur here, then this check should be performed
|
||||
// in a loop as there may be a race with the client connecting.
|
||||
framework.IssueSSHCommandWithResult(
|
||||
e2essh.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf("sudo cat /proc/net/nf_conntrack | grep 'dport=%v'",
|
||||
testDaemonTCPPort),
|
||||
framework.TestContext.Provider,
|
||||
@ -189,7 +190,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
|
||||
// Timeout in seconds is available as the fifth column from
|
||||
// /proc/net/nf_conntrack.
|
||||
result, err := framework.IssueSSHCommandWithResult(
|
||||
result, err := e2essh.IssueSSHCommandWithResult(
|
||||
fmt.Sprintf(
|
||||
"sudo cat /proc/net/nf_conntrack "+
|
||||
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
gcecloud "k8s.io/legacy-cloud-providers/gce"
|
||||
|
||||
@ -311,7 +312,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
It("should be able to up and down services", func() {
|
||||
// TODO: use the ServiceTestJig here
|
||||
// this test uses framework.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
// this test does not work if the Node does not support SSH Key
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
@ -326,7 +327,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns)
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node")
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
@ -390,7 +391,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("VIPs conflict: %v", svc1IP)
|
||||
}
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node")
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
@ -408,12 +409,12 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
|
||||
|
||||
By("Removing iptable rules")
|
||||
result, err := framework.SSH(`
|
||||
result, err := e2essh.SSH(`
|
||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
framework.Failf("couldn't remove iptable rules: %v", err)
|
||||
}
|
||||
framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
|
||||
@ -433,7 +434,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns)
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node")
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
@ -1724,7 +1725,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
|
||||
It("should implement service.kubernetes.io/service-proxy-name", func() {
|
||||
// this test uses framework.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||
// this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
// this test does not work if the Node does not support SSH Key
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
@ -1751,7 +1752,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name)
|
||||
|
||||
hosts, err := framework.NodeSSHHosts(cs)
|
||||
hosts, err := e2essh.NodeSSHHosts(cs)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node")
|
||||
if len(hosts) == 0 {
|
||||
framework.Failf("No ssh-able nodes")
|
||||
|
@ -38,6 +38,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -39,7 +40,7 @@ var _ = SIGDescribe("crictl", func() {
|
||||
It("should be able to run crictl on the node", func() {
|
||||
// Get all nodes' external IPs.
|
||||
By("Getting all nodes' SSH-able IP addresses")
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Failf("Error getting node hostnames: %v", err)
|
||||
}
|
||||
@ -56,7 +57,7 @@ var _ = SIGDescribe("crictl", func() {
|
||||
host := hosts[0]
|
||||
By(fmt.Sprintf("SSH'ing to node %q to run %q", host, testCase.cmd))
|
||||
|
||||
result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
|
||||
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
|
||||
if err != nil {
|
||||
framework.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -214,9 +215,9 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
||||
for _, test := range tests {
|
||||
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
|
||||
err = wait.Poll(poll, timeout, func() (bool, error) {
|
||||
result, err := framework.NodeExec(nodeIP, test.cmd)
|
||||
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
|
||||
if expectClean && ok { // keep trying
|
||||
return false, nil
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -103,7 +104,7 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
hostDir := "/var/lib/kubelet/" + f.Namespace.Name
|
||||
defer func() {
|
||||
cleanCmd := fmt.Sprintf("sudo rm -rf %q", hostDir)
|
||||
framework.IssueSSHCommand(cleanCmd, framework.TestContext.Provider, node)
|
||||
e2essh.IssueSSHCommand(cleanCmd, framework.TestContext.Provider, node)
|
||||
}()
|
||||
|
||||
podClient := f.PodClient()
|
||||
@ -140,12 +141,12 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
// The host mounts one tmpfs to testdir/host and puts a file there so we
|
||||
// can check mount propagation from the host to pods.
|
||||
cmd := fmt.Sprintf("sudo mkdir %[1]q/host; sudo mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
|
||||
err := framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
err := e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
defer func() {
|
||||
cmd := fmt.Sprintf("sudo umount %q/host", hostDir)
|
||||
framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
}()
|
||||
|
||||
// Now check that mounts are propagated to the right containers.
|
||||
@ -181,12 +182,12 @@ var _ = SIGDescribe("Mount propagation", func() {
|
||||
// Check that the mounts are/are not propagated to the host.
|
||||
// Host can see mount from master
|
||||
cmd = fmt.Sprintf("test `cat %q/master/file` = master", hostDir)
|
||||
err = framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
err = e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
framework.ExpectNoError(err, "host should see mount from master")
|
||||
|
||||
// Host can't see mount from slave
|
||||
cmd = fmt.Sprintf("test ! -e %q/slave/file", hostDir)
|
||||
err = framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
err = e2essh.IssueSSHCommand(cmd, framework.TestContext.Provider, node)
|
||||
framework.ExpectNoError(err, "host shouldn't see mount from slave")
|
||||
})
|
||||
})
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -80,7 +81,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
|
||||
workingSetStats[host] = []float64{}
|
||||
|
||||
cmd := "systemctl status node-problem-detector.service"
|
||||
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
isStandaloneMode[host] = (err == nil && result.Code == 0)
|
||||
|
||||
By(fmt.Sprintf("Check node %q has node-problem-detector process", host))
|
||||
@ -88,14 +89,14 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
|
||||
// showing up, because string text "[n]ode-problem-detector" does not
|
||||
// match regular expression "[n]ode-problem-detector".
|
||||
psCmd := "ps aux | grep [n]ode-problem-detector"
|
||||
result, err = framework.SSH(psCmd, host, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(psCmd, host, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(result.Code).To(BeZero())
|
||||
Expect(result.Stdout).To(ContainSubstring("node-problem-detector"))
|
||||
|
||||
By(fmt.Sprintf("Check node-problem-detector is running fine on node %q", host))
|
||||
journalctlCmd := "sudo journalctl -u node-problem-detector"
|
||||
result, err = framework.SSH(journalctlCmd, host, framework.TestContext.Provider)
|
||||
result, err = e2essh.SSH(journalctlCmd, host, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(result.Code).To(BeZero())
|
||||
Expect(result.Stdout).NotTo(ContainSubstring("node-problem-detector.service: Failed"))
|
||||
@ -109,7 +110,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
|
||||
By(fmt.Sprintf("Inject log to trigger AUFSUmountHung on node %q", host))
|
||||
log := "INFO: task umount.aufs:21568 blocked for more than 120 seconds."
|
||||
injectLogCmd := "sudo sh -c \"echo 'kernel: " + log + "' >> /dev/kmsg\""
|
||||
_, err = framework.SSH(injectLogCmd, host, framework.TestContext.Provider)
|
||||
_, err = e2essh.SSH(injectLogCmd, host, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(result.Code).To(BeZero())
|
||||
}
|
||||
@ -214,7 +215,7 @@ func verifyNodeCondition(f *framework.Framework, condition v1.NodeConditionType,
|
||||
|
||||
func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64) {
|
||||
memCmd := "cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.usage_in_bytes && cat /sys/fs/cgroup/memory/system.slice/node-problem-detector.service/memory.stat"
|
||||
result, err := framework.SSH(memCmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(memCmd, host, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(result.Code).To(BeZero())
|
||||
lines := strings.Split(result.Stdout, "\n")
|
||||
@ -250,7 +251,7 @@ func getMemoryStat(f *framework.Framework, host string) (rss, workingSet float64
|
||||
|
||||
func getCpuStat(f *framework.Framework, host string) (usage, uptime float64) {
|
||||
cpuCmd := "cat /sys/fs/cgroup/cpu/system.slice/node-problem-detector.service/cpuacct.usage && cat /proc/uptime | awk '{print $1}'"
|
||||
result, err := framework.SSH(cpuCmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(cpuCmd, host, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(result.Code).To(BeZero())
|
||||
lines := strings.Split(result.Stdout, "\n")
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -33,7 +34,7 @@ var _ = SIGDescribe("SSH", func() {
|
||||
f := framework.NewDefaultFramework("ssh")
|
||||
|
||||
BeforeEach(func() {
|
||||
// When adding more providers here, also implement their functionality in util.go's framework.GetSigner(...).
|
||||
// When adding more providers here, also implement their functionality in e2essh.GetSigner(...).
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
|
||||
// This test SSH's into the node for which it needs the $HOME/.ssh/id_rsa key to be present. So
|
||||
@ -44,7 +45,7 @@ var _ = SIGDescribe("SSH", func() {
|
||||
It("should SSH to all nodes and run commands", func() {
|
||||
// Get all nodes' external IPs.
|
||||
By("Getting all nodes' SSH-able IP addresses")
|
||||
hosts, err := framework.NodeSSHHosts(f.ClientSet)
|
||||
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
|
||||
if err != nil {
|
||||
framework.Failf("Error getting node hostnames: %v", err)
|
||||
}
|
||||
@ -78,7 +79,7 @@ var _ = SIGDescribe("SSH", func() {
|
||||
By(fmt.Sprintf("SSH'ing to %d nodes and running %s", len(testhosts), testCase.cmd))
|
||||
|
||||
for _, host := range testhosts {
|
||||
result, err := framework.SSH(testCase.cmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
|
||||
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
|
||||
if err != testCase.expectedError {
|
||||
framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
|
||||
@ -104,7 +105,7 @@ var _ = SIGDescribe("SSH", func() {
|
||||
|
||||
// Quickly test that SSH itself errors correctly.
|
||||
By("SSH'ing to a nonexistent host")
|
||||
if _, err = framework.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
|
||||
if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
|
||||
framework.Failf("Expected error trying to SSH to nonexistent host.")
|
||||
}
|
||||
})
|
||||
|
@ -69,6 +69,7 @@ go_library(
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
|
@ -25,10 +25,11 @@ import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -133,8 +134,8 @@ func getFlexDir(c clientset.Interface, node *v1.Node, vendor, driver string) str
|
||||
}
|
||||
|
||||
func sshAndLog(cmd, host string, failOnError bool) {
|
||||
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
result, err := e2essh.SSH(cmd, host, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code != 0 && failOnError {
|
||||
framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr)
|
||||
|
@ -27,6 +27,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
)
|
||||
@ -110,14 +111,14 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
e2elog.Logf("Checking if sudo command is present")
|
||||
sshResult, err := framework.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
sudoPresent = true
|
||||
}
|
||||
|
||||
e2elog.Logf("Checking if systemctl command is present")
|
||||
sshResult, err = framework.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err = e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
@ -134,9 +135,9 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
}
|
||||
|
||||
e2elog.Logf("Attempting `%s`", command)
|
||||
sshResult, err = framework.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
framework.LogSSHResult(sshResult)
|
||||
e2essh.LogResult(sshResult)
|
||||
Expect(sshResult.Code).To(BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||
|
||||
if kOp == KStop {
|
||||
@ -178,9 +179,9 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
e2elog.Logf("Attempting `%s`", command)
|
||||
sshResult, err := framework.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
sshResult, err := e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
|
||||
framework.LogSSHResult(sshResult)
|
||||
e2essh.LogResult(sshResult)
|
||||
Expect(sshResult.Code).To(BeZero(), "Failed to get kubelet PID")
|
||||
Expect(sshResult.Stdout).NotTo(BeEmpty(), "Kubelet Main PID should not be Empty")
|
||||
return sshResult.Stdout
|
||||
@ -212,15 +213,15 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
By("Expecting the volume mount to be found.")
|
||||
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
|
||||
if checkSubpath {
|
||||
By("Expecting the volume subpath mount to be found.")
|
||||
result, err := framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
result, err := e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||
}
|
||||
@ -254,16 +255,16 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
||||
}
|
||||
|
||||
By("Expecting the volume mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
|
||||
e2elog.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
|
||||
if checkSubpath {
|
||||
By("Expecting the volume subpath mount not to be found.")
|
||||
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
result, err = e2essh.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||
e2essh.LogResult(result)
|
||||
framework.ExpectNoError(err, "Encountered SSH error.")
|
||||
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||
e2elog.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||
|
@ -54,6 +54,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -43,6 +43,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -801,9 +802,9 @@ func GetReadySchedulableRandomNodeInfo() *NodeInfo {
|
||||
func invokeVCenterServiceControl(command, service, host string) error {
|
||||
sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
|
||||
e2elog.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
|
||||
result, err := framework.SSH(sshCmd, host, framework.TestContext.Provider)
|
||||
result, err := e2essh.SSH(sshCmd, host, framework.TestContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
e2essh.LogResult(result)
|
||||
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
|
||||
}
|
||||
return nil
|
||||
|
Loading…
Reference in New Issue
Block a user