mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #81693 from oomichi/replace-e2elog-framework-c-n
Use log functions of core framework on [c-n]
This commit is contained in:
commit
8e05e8346a
@ -34,7 +34,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
)
|
)
|
||||||
@ -81,7 +80,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
|||||||
for _, fileName := range files {
|
for _, fileName := range files {
|
||||||
data, err := testfiles.Read(fileName)
|
data, err := testfiles.Read(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("reading manifest file: %v", err)
|
Failf("reading manifest file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split at the "---" separator before working on
|
// Split at the "---" separator before working on
|
||||||
@ -117,7 +116,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
|||||||
func (f *Framework) PatchItems(items ...interface{}) error {
|
func (f *Framework) PatchItems(items ...interface{}) error {
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
// Uncomment when debugging the loading and patching of items.
|
// Uncomment when debugging the loading and patching of items.
|
||||||
// e2elog.Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
||||||
if err := f.patchItemRecursively(item); err != nil {
|
if err := f.patchItemRecursively(item); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -156,7 +155,7 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
|
|||||||
// to non-namespaced items.
|
// to non-namespaced items.
|
||||||
for _, destructor := range destructors {
|
for _, destructor := range destructors {
|
||||||
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
|
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
|
||||||
e2elog.Logf("deleting failed: %s", err)
|
Logf("deleting failed: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -169,12 +168,12 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
|
|||||||
description := DescribeItem(item)
|
description := DescribeItem(item)
|
||||||
// Uncomment this line to get a full dump of the entire item.
|
// Uncomment this line to get a full dump of the entire item.
|
||||||
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
|
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
|
||||||
e2elog.Logf("creating %s", description)
|
Logf("creating %s", description)
|
||||||
for _, factory := range factories {
|
for _, factory := range factories {
|
||||||
destructor, err := factory.Create(f, item)
|
destructor, err := factory.Create(f, item)
|
||||||
if destructor != nil {
|
if destructor != nil {
|
||||||
destructors = append(destructors, func() error {
|
destructors = append(destructors, func() error {
|
||||||
e2elog.Logf("deleting %s", description)
|
Logf("deleting %s", description)
|
||||||
return destructor()
|
return destructor()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -417,7 +416,7 @@ func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, er
|
|||||||
return nil, errorItemNotSupported
|
return nil, errorItemNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Define cluster role %v", item.GetName())
|
Logf("Define cluster role %v", item.GetName())
|
||||||
client := f.ClientSet.RbacV1().ClusterRoles()
|
client := f.ClientSet.RbacV1().ClusterRoles()
|
||||||
if _, err := client.Create(item); err != nil {
|
if _, err := client.Create(item); err != nil {
|
||||||
return nil, errors.Wrap(err, "create ClusterRole")
|
return nil, errors.Wrap(err, "create ClusterRole")
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/tools/remotecommand"
|
"k8s.io/client-go/tools/remotecommand"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
|
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
@ -49,7 +48,7 @@ type ExecOptions struct {
|
|||||||
// returning stdout, stderr and error. `options` allowed for
|
// returning stdout, stderr and error. `options` allowed for
|
||||||
// additional parameters to be passed.
|
// additional parameters to be passed.
|
||||||
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
||||||
e2elog.Logf("ExecWithOptions %+v", options)
|
Logf("ExecWithOptions %+v", options)
|
||||||
|
|
||||||
config, err := LoadConfig()
|
config, err := LoadConfig()
|
||||||
ExpectNoError(err, "failed to load restclient config")
|
ExpectNoError(err, "failed to load restclient config")
|
||||||
@ -98,7 +97,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
|
|||||||
// ExecCommandInContainer executes a command in the specified container.
|
// ExecCommandInContainer executes a command in the specified container.
|
||||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||||
e2elog.Logf("Exec stderr: %q", stderr)
|
Logf("Exec stderr: %q", stderr)
|
||||||
ExpectNoError(err,
|
ExpectNoError(err,
|
||||||
"failed to execute command in pod %v, container %v: %v",
|
"failed to execute command in pod %v, container %v: %v",
|
||||||
podName, containerName, err)
|
podName, containerName, err)
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -60,7 +59,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
|
|||||||
if desc != "" {
|
if desc != "" {
|
||||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||||
}
|
}
|
||||||
e2elog.Logf(msg)
|
Logf(msg)
|
||||||
f.lock.Lock()
|
f.lock.Lock()
|
||||||
defer f.lock.Unlock()
|
defer f.lock.Unlock()
|
||||||
f.Flakes = append(f.Flakes, msg)
|
f.Flakes = append(f.Flakes, msg)
|
||||||
|
@ -46,7 +46,6 @@ import (
|
|||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/restmapper"
|
"k8s.io/client-go/restmapper"
|
||||||
scaleclient "k8s.io/client-go/scale"
|
scaleclient "k8s.io/client-go/scale"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
|
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
|
||||||
@ -225,7 +224,7 @@ func (f *Framework) BeforeEach() {
|
|||||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||||
ExpectNoError(err)
|
ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Skipping waiting for service account")
|
Logf("Skipping waiting for service account")
|
||||||
}
|
}
|
||||||
f.UniqueName = f.Namespace.GetName()
|
f.UniqueName = f.Namespace.GetName()
|
||||||
} else {
|
} else {
|
||||||
@ -253,7 +252,7 @@ func (f *Framework) BeforeEach() {
|
|||||||
PrintVerboseLogs: false,
|
PrintVerboseLogs: false,
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go f.gatherer.StartGatheringData()
|
go f.gatherer.StartGatheringData()
|
||||||
}
|
}
|
||||||
@ -274,13 +273,13 @@ func (f *Framework) BeforeEach() {
|
|||||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||||
} else {
|
} else {
|
||||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
|
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,15 +310,15 @@ func (f *Framework) AfterEach() {
|
|||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
nsDeletionErrors[ns.Name] = err
|
nsDeletionErrors[ns.Name] = err
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Namespace %v was already deleted", ns.Name)
|
Logf("Namespace %v was already deleted", ns.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !TestContext.DeleteNamespace {
|
if !TestContext.DeleteNamespace {
|
||||||
e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -334,7 +333,7 @@ func (f *Framework) AfterEach() {
|
|||||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||||
}
|
}
|
||||||
e2elog.Failf(strings.Join(messages, ","))
|
Failf(strings.Join(messages, ","))
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -366,11 +365,11 @@ func (f *Framework) AfterEach() {
|
|||||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||||
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||||
} else {
|
} else {
|
||||||
received, err := grabber.Grab()
|
received, err := grabber.Grab()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||||
}
|
}
|
||||||
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||||
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
|
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
|
||||||
@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
|
|||||||
// This is explicitly done at the very end of the test, to avoid
|
// This is explicitly done at the very end of the test, to avoid
|
||||||
// e.g. not removing namespace in case of this failure.
|
// e.g. not removing namespace in case of this failure.
|
||||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||||
e2elog.Failf("All nodes should be ready after test, %v", err)
|
Failf("All nodes should be ready after test, %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,7 +489,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
|
|||||||
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
|
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
|
||||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -501,7 +500,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str
|
|||||||
|
|
||||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||||
}
|
}
|
||||||
return string(stdout), err
|
return string(stdout), err
|
||||||
}
|
}
|
||||||
@ -512,7 +511,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
|
|||||||
|
|
||||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||||
}
|
}
|
||||||
return string(stdout), err
|
return string(stdout), err
|
||||||
}
|
}
|
||||||
@ -549,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
|||||||
TargetPort: intstr.FromInt(contPort),
|
TargetPort: intstr.FromInt(contPort),
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "service-for-" + appName,
|
Name: "service-for-" + appName,
|
||||||
@ -575,7 +574,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
|
|||||||
for i, node := range nodes.Items {
|
for i, node := range nodes.Items {
|
||||||
// one per node, but no more than maxCount.
|
// one per node, but no more than maxCount.
|
||||||
if i <= maxCount {
|
if i <= maxCount {
|
||||||
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||||
@ -646,19 +645,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
|||||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||||
if numRetries > 0 {
|
if numRetries > 0 {
|
||||||
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||||
}
|
}
|
||||||
|
|
||||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||||
// Retry on "i/o timeout" errors
|
// Retry on "i/o timeout" errors
|
||||||
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||||
// Retry on "container not found" errors
|
// Retry on "container not found" errors
|
||||||
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -683,7 +682,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
|
|||||||
cmd := KubectlCmd(cmdArgs...)
|
cmd := KubectlCmd(cmdArgs...)
|
||||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||||
|
|
||||||
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||||
err := cmd.Run()
|
err := cmd.Run()
|
||||||
return stdout.Bytes(), stderr.Bytes(), err
|
return stdout.Bytes(), stderr.Bytes(), err
|
||||||
}
|
}
|
||||||
@ -790,7 +789,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
|
|||||||
|
|
||||||
ns := namespace.Name
|
ns := namespace.Name
|
||||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
||||||
e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||||
if len(pl.Items) == 0 || err != nil {
|
if len(pl.Items) == 0 || err != nil {
|
||||||
return pl.Items, err
|
return pl.Items, err
|
||||||
}
|
}
|
||||||
@ -805,7 +804,7 @@ ReturnPodsSoFar:
|
|||||||
}
|
}
|
||||||
passesVerify, err := passesVerifyFilter(pod, p.Verify)
|
passesVerify, err := passesVerifyFilter(pod, p.Verify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
|
Logf("Error detected on %v : %v !", pod.Name, err)
|
||||||
break ReturnPodsSoFar
|
break ReturnPodsSoFar
|
||||||
}
|
}
|
||||||
if passesVerify {
|
if passesVerify {
|
||||||
@ -826,12 +825,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
|||||||
|
|
||||||
// Failure
|
// Failure
|
||||||
if returnedErr != nil {
|
if returnedErr != nil {
|
||||||
e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
||||||
// stop polling if the pod filtering returns an error. that should never happen.
|
// stop polling if the pod filtering returns an error. that should never happen.
|
||||||
// it indicates, for example, that the client is broken or something non-pod related.
|
// it indicates, for example, that the client is broken or something non-pod related.
|
||||||
return false, returnedErr
|
return false, returnedErr
|
||||||
}
|
}
|
||||||
e2elog.Logf("Found %v / %v", len(pods), atLeast)
|
Logf("Found %v / %v", len(pods), atLeast)
|
||||||
|
|
||||||
// Success
|
// Success
|
||||||
if len(pods) >= atLeast {
|
if len(pods) >= atLeast {
|
||||||
@ -840,7 +839,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
|||||||
// Keep trying...
|
// Keep trying...
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
||||||
return pods, err
|
return pods, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -848,7 +847,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
|
|||||||
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
|
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
|
||||||
pods, err := cl.WaitFor(atLeast, timeout)
|
pods, err := cl.WaitFor(atLeast, timeout)
|
||||||
if err != nil || len(pods) < atLeast {
|
if err != nil || len(pods) < atLeast {
|
||||||
e2elog.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -861,14 +860,14 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
|||||||
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if len(pods) == 0 {
|
if len(pods) == 0 {
|
||||||
e2elog.Failf("No pods matched the filter.")
|
Failf("No pods matched the filter.")
|
||||||
}
|
}
|
||||||
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
||||||
for _, p := range pods {
|
for _, p := range pods {
|
||||||
podFunc(p)
|
podFunc(p)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@ -880,7 +879,7 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
|
|||||||
return func(format string, args ...interface{}) {
|
return func(format string, args ...interface{}) {
|
||||||
writer := bufio.NewWriter(file)
|
writer := bufio.NewWriter(file)
|
||||||
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
|
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
|
||||||
e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
||||||
}
|
}
|
||||||
writer.Flush()
|
writer.Flush()
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -47,7 +46,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
|
|||||||
// Get kubernetes component resource usage
|
// Get kubernetes component resource usage
|
||||||
sshResult, err := getMasterUsageByPrefix("kube")
|
sshResult, err := getMasterUsageByPrefix("kube")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||||
@ -65,7 +64,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
|
|||||||
// Get etcd resource usage
|
// Get etcd resource usage
|
||||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe")
|
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
||||||
|
@ -23,8 +23,6 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: These should really just use the GCE API client library or at least use
|
// TODO: These should really just use the GCE API client library or at least use
|
||||||
@ -48,9 +46,9 @@ func lookupClusterImageSources() (string, string, error) {
|
|||||||
str = strings.Replace(str, ";", "\n", -1)
|
str = strings.Replace(str, ";", "\n", -1)
|
||||||
lines := strings.Split(str, "\n")
|
lines := strings.Split(str, "\n")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
|
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
|
||||||
for _, l := range lines {
|
for _, l := range lines {
|
||||||
e2elog.Logf(" > %s", l)
|
Logf(" > %s", l)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return lines, err
|
return lines, err
|
||||||
@ -114,11 +112,11 @@ func lookupClusterImageSources() (string, string, error) {
|
|||||||
func LogClusterImageSources() {
|
func LogClusterImageSources() {
|
||||||
masterImg, nodeImg, err := lookupClusterImageSources()
|
masterImg, nodeImg, err := lookupClusterImageSources()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Cluster image sources lookup failed: %v\n", err)
|
Logf("Cluster image sources lookup failed: %v\n", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e2elog.Logf("cluster-master-image: %s", masterImg)
|
Logf("cluster-master-image: %s", masterImg)
|
||||||
e2elog.Logf("cluster-node-image: %s", nodeImg)
|
Logf("cluster-node-image: %s", nodeImg)
|
||||||
|
|
||||||
images := map[string]string{
|
images := map[string]string{
|
||||||
"master_os_image": masterImg,
|
"master_os_image": masterImg,
|
||||||
@ -128,7 +126,7 @@ func LogClusterImageSources() {
|
|||||||
outputBytes, _ := json.MarshalIndent(images, "", " ")
|
outputBytes, _ := json.MarshalIndent(images, "", " ")
|
||||||
filePath := filepath.Join(TestContext.ReportDir, "images.json")
|
filePath := filepath.Join(TestContext.ReportDir, "images.json")
|
||||||
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
|
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
|
||||||
e2elog.Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
)
|
)
|
||||||
@ -259,10 +258,10 @@ func (g *LogSizeGatherer) Work() bool {
|
|||||||
TestContext.Provider,
|
TestContext.Provider,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||||
// In case of repeated error give up.
|
// In case of repeated error give up.
|
||||||
if workItem.backoffMultiplier >= 128 {
|
if workItem.backoffMultiplier >= 128 {
|
||||||
e2elog.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||||
g.wg.Done()
|
g.wg.Done()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -278,7 +277,7 @@ func (g *LogSizeGatherer) Work() bool {
|
|||||||
path := results[i]
|
path := results[i]
|
||||||
size, err := strconv.Atoi(results[i+1])
|
size, err := strconv.Atoi(results[i+1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
g.data.addNewData(workItem.ip, path, now, size)
|
g.data.addNewData(workItem.ip, path, now, size)
|
||||||
|
@ -38,7 +38,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -172,10 +171,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
|
|||||||
if foundEndpoints.Has(e.Name) {
|
if foundEndpoints.Has(e.Name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
|
||||||
desc, _ := RunKubectl(
|
desc, _ := RunKubectl(
|
||||||
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
|
||||||
e2elog.Logf(desc)
|
Logf(desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,11 +219,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
|
|||||||
// A failure to kubectl exec counts as a try, not a hard fail.
|
// A failure to kubectl exec counts as a try, not a hard fail.
|
||||||
// Also note that we will keep failing for maxTries in tests where
|
// Also note that we will keep failing for maxTries in tests where
|
||||||
// we confirm unreachability.
|
// we confirm unreachability.
|
||||||
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
|
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
|
||||||
} else {
|
} else {
|
||||||
var output map[string][]string
|
var output map[string][]string
|
||||||
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
||||||
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
||||||
cmd, config.HostTestContainerPod.Name, stdout, err)
|
cmd, config.HostTestContainerPod.Name, stdout, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -236,7 +235,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e2elog.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
|
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
|
||||||
|
|
||||||
// Check against i+1 so we exit if minTries == maxTries.
|
// Check against i+1 so we exit if minTries == maxTries.
|
||||||
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
|
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
|
||||||
@ -247,7 +246,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
|
|||||||
}
|
}
|
||||||
|
|
||||||
config.diagnoseMissingEndpoints(eps)
|
config.diagnoseMissingEndpoints(eps)
|
||||||
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
|
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
|
||||||
@ -279,12 +278,12 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
|
|||||||
// A failure to kubectl exec counts as a try, not a hard fail.
|
// A failure to kubectl exec counts as a try, not a hard fail.
|
||||||
// Also note that we will keep failing for maxTries in tests where
|
// Also note that we will keep failing for maxTries in tests where
|
||||||
// we confirm unreachability.
|
// we confirm unreachability.
|
||||||
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
|
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
|
||||||
} else {
|
} else {
|
||||||
e2elog.Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
|
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
|
||||||
var output map[string][]string
|
var output map[string][]string
|
||||||
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
|
||||||
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
|
||||||
cmd, config.HostTestContainerPod.Name, stdout, err)
|
cmd, config.HostTestContainerPod.Name, stdout, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -338,7 +337,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
|
|||||||
// A failure to exec command counts as a try, not a hard fail.
|
// A failure to exec command counts as a try, not a hard fail.
|
||||||
// Also note that we will keep failing for maxTries in tests where
|
// Also note that we will keep failing for maxTries in tests where
|
||||||
// we confirm unreachability.
|
// we confirm unreachability.
|
||||||
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
|
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
|
||||||
} else {
|
} else {
|
||||||
trimmed := strings.TrimSpace(stdout)
|
trimmed := strings.TrimSpace(stdout)
|
||||||
if trimmed != "" {
|
if trimmed != "" {
|
||||||
@ -348,18 +347,18 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
|
|||||||
|
|
||||||
// Check against i+1 so we exit if minTries == maxTries.
|
// Check against i+1 so we exit if minTries == maxTries.
|
||||||
if eps.Equal(expectedEps) && i+1 >= minTries {
|
if eps.Equal(expectedEps) && i+1 >= minTries {
|
||||||
e2elog.Logf("Found all expected endpoints: %+v", eps.List())
|
Logf("Found all expected endpoints: %+v", eps.List())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
|
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
|
||||||
|
|
||||||
// TODO: get rid of this delay #36281
|
// TODO: get rid of this delay #36281
|
||||||
time.Sleep(hitEndpointRetryDelay)
|
time.Sleep(hitEndpointRetryDelay)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.diagnoseMissingEndpoints(eps)
|
config.diagnoseMissingEndpoints(eps)
|
||||||
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSelfURL executes a curl against the given path via kubectl exec into a
|
// GetSelfURL executes a curl against the given path via kubectl exec into a
|
||||||
@ -392,21 +391,21 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
|
|||||||
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
|
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
|
||||||
e2elog.Logf(msg)
|
Logf(msg)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !strings.Contains(stdout, expected) {
|
if !strings.Contains(stdout, expected) {
|
||||||
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
|
||||||
e2elog.Logf(msg)
|
Logf(msg)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}); pollErr != nil {
|
}); pollErr != nil {
|
||||||
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
|
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
|
||||||
desc, _ := RunKubectl(
|
desc, _ := RunKubectl(
|
||||||
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
|
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
|
||||||
e2elog.Logf("%s", desc)
|
Logf("%s", desc)
|
||||||
e2elog.Failf("Timed out in %v: %v", retryTimeout, msg)
|
Failf("Timed out in %v: %v", retryTimeout, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -550,12 +549,12 @@ func (config *NetworkingTestConfig) createTestPods() {
|
|||||||
var err error
|
var err error
|
||||||
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
|
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
|
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
|
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
|
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -675,12 +674,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
|||||||
// wait for pod being deleted.
|
// wait for pod being deleted.
|
||||||
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to delete %s pod: %v", pod.Name, err)
|
Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
// wait for endpoint being removed.
|
// wait for endpoint being removed.
|
||||||
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
|
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
|
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
|
||||||
}
|
}
|
||||||
// wait for kube-proxy to catch up with the pod being deleted.
|
// wait for kube-proxy to catch up with the pod being deleted.
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
@ -707,12 +706,12 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
|
|||||||
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||||
_, err := RunHostCmd(namespace, pod, cmd)
|
_, err := RunHostCmd(namespace, pod, cmd)
|
||||||
if expectToBeReachable && err != nil {
|
if expectToBeReachable && err != nil {
|
||||||
e2elog.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
|
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !expectToBeReachable && err == nil {
|
if !expectToBeReachable && err == nil {
|
||||||
e2elog.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
|
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -780,11 +779,11 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
|||||||
// Sanity check inputs, because it has happened. These are the only things
|
// Sanity check inputs, because it has happened. These are the only things
|
||||||
// that should hard fail the test - they are basically ASSERT()s.
|
// that should hard fail the test - they are basically ASSERT()s.
|
||||||
if host == "" {
|
if host == "" {
|
||||||
e2elog.Failf("Got empty host for HTTP poke (%s)", url)
|
Failf("Got empty host for HTTP poke (%s)", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
if port == 0 {
|
if port == 0 {
|
||||||
e2elog.Failf("Got port==0 for HTTP poke (%s)", url)
|
Failf("Got port==0 for HTTP poke (%s)", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -796,7 +795,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
|||||||
params.ExpectCode = http.StatusOK
|
params.ExpectCode = http.StatusOK
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Poking %q", url)
|
Logf("Poking %q", url)
|
||||||
|
|
||||||
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
|
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -809,7 +808,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
|||||||
} else {
|
} else {
|
||||||
ret.Status = HTTPError
|
ret.Status = HTTPError
|
||||||
}
|
}
|
||||||
e2elog.Logf("Poke(%q): %v", url, err)
|
Logf("Poke(%q): %v", url, err)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -820,7 +819,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
ret.Status = HTTPError
|
ret.Status = HTTPError
|
||||||
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
|
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
|
||||||
e2elog.Logf("Poke(%q): %v", url, ret.Error)
|
Logf("Poke(%q): %v", url, ret.Error)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
ret.Body = make([]byte, len(body))
|
ret.Body = make([]byte, len(body))
|
||||||
@ -831,25 +830,25 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
|
|||||||
if resp.StatusCode == code {
|
if resp.StatusCode == code {
|
||||||
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
|
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
|
||||||
ret.Status = HTTPRetryCode
|
ret.Status = HTTPRetryCode
|
||||||
e2elog.Logf("Poke(%q): %v", url, ret.Error)
|
Logf("Poke(%q): %v", url, ret.Error)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret.Status = HTTPWrongCode
|
ret.Status = HTTPWrongCode
|
||||||
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
|
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
|
||||||
e2elog.Logf("Poke(%q): %v", url, ret.Error)
|
Logf("Poke(%q): %v", url, ret.Error)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
|
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
|
||||||
ret.Status = HTTPBadResponse
|
ret.Status = HTTPBadResponse
|
||||||
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
|
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
|
||||||
e2elog.Logf("Poke(%q): %v", url, ret.Error)
|
Logf("Poke(%q): %v", url, ret.Error)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
ret.Status = HTTPSuccess
|
ret.Status = HTTPSuccess
|
||||||
e2elog.Logf("Poke(%q): success", url)
|
Logf("Poke(%q): success", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -916,11 +915,11 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
// Sanity check inputs, because it has happened. These are the only things
|
// Sanity check inputs, because it has happened. These are the only things
|
||||||
// that should hard fail the test - they are basically ASSERT()s.
|
// that should hard fail the test - they are basically ASSERT()s.
|
||||||
if host == "" {
|
if host == "" {
|
||||||
e2elog.Failf("Got empty host for UDP poke (%s)", url)
|
Failf("Got empty host for UDP poke (%s)", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
if port == 0 {
|
if port == 0 {
|
||||||
e2elog.Failf("Got port==0 for UDP poke (%s)", url)
|
Failf("Got port==0 for UDP poke (%s)", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -929,13 +928,13 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
params = &UDPPokeParams{}
|
params = &UDPPokeParams{}
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Poking %v", url)
|
Logf("Poking %v", url)
|
||||||
|
|
||||||
con, err := net.Dial("udp", hostPort)
|
con, err := net.Dial("udp", hostPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ret.Status = UDPError
|
ret.Status = UDPError
|
||||||
ret.Error = err
|
ret.Error = err
|
||||||
e2elog.Logf("Poke(%q): %v", url, err)
|
Logf("Poke(%q): %v", url, err)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -950,7 +949,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
} else {
|
} else {
|
||||||
ret.Status = UDPError
|
ret.Status = UDPError
|
||||||
}
|
}
|
||||||
e2elog.Logf("Poke(%q): %v", url, err)
|
Logf("Poke(%q): %v", url, err)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -959,7 +958,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
ret.Status = UDPError
|
ret.Status = UDPError
|
||||||
ret.Error = err
|
ret.Error = err
|
||||||
e2elog.Logf("Poke(%q): %v", url, err)
|
Logf("Poke(%q): %v", url, err)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -980,7 +979,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
} else {
|
} else {
|
||||||
ret.Status = UDPError
|
ret.Status = UDPError
|
||||||
}
|
}
|
||||||
e2elog.Logf("Poke(%q): %v", url, err)
|
Logf("Poke(%q): %v", url, err)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
ret.Response = buf[0:n]
|
ret.Response = buf[0:n]
|
||||||
@ -988,12 +987,12 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
|
|||||||
if params.Response != "" && string(ret.Response) != params.Response {
|
if params.Response != "" && string(ret.Response) != params.Response {
|
||||||
ret.Status = UDPBadResponse
|
ret.Status = UDPBadResponse
|
||||||
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
|
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
|
||||||
e2elog.Logf("Poke(%q): %v", url, ret.Error)
|
Logf("Poke(%q): %v", url, ret.Error)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
ret.Status = UDPSuccess
|
ret.Status = UDPSuccess
|
||||||
e2elog.Logf("Poke(%q): success", url)
|
Logf("Poke(%q): success", url)
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,7 +1004,7 @@ func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Dur
|
|||||||
// TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
|
// TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
|
||||||
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
|
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
|
||||||
countToSucceed int) error {
|
countToSucceed int) error {
|
||||||
e2elog.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
|
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
|
||||||
hittedHosts := sets.NewString()
|
hittedHosts := sets.NewString()
|
||||||
count := 0
|
count := 0
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
@ -1016,13 +1015,13 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
|
|||||||
|
|
||||||
hittedHost := strings.TrimSpace(string(result.Body))
|
hittedHost := strings.TrimSpace(string(result.Body))
|
||||||
if !expectedHosts.Has(hittedHost) {
|
if !expectedHosts.Has(hittedHost) {
|
||||||
e2elog.Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
|
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
|
||||||
count = 0
|
count = 0
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !hittedHosts.Has(hittedHost) {
|
if !hittedHosts.Has(hittedHost) {
|
||||||
hittedHosts.Insert(hittedHost)
|
hittedHosts.Insert(hittedHost)
|
||||||
e2elog.Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
|
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
|
||||||
}
|
}
|
||||||
if hittedHosts.Equal(expectedHosts) {
|
if hittedHosts.Equal(expectedHosts) {
|
||||||
count++
|
count++
|
||||||
@ -1047,7 +1046,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
|
|||||||
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
||||||
host, err := e2enode.GetExternalIP(node)
|
host, err := e2enode.GetExternalIP(node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("Error getting node external ip : %v", err)
|
Failf("Error getting node external ip : %v", err)
|
||||||
}
|
}
|
||||||
masterAddresses := GetAllMasterAddresses(c)
|
masterAddresses := GetAllMasterAddresses(c)
|
||||||
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
||||||
@ -1062,17 +1061,17 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
||||||
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
||||||
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, masterAddress := range masterAddresses {
|
||||||
BlockNetwork(host, masterAddress)
|
BlockNetwork(host, masterAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
||||||
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
||||||
e2elog.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
|
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
testFunc()
|
testFunc()
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||||
)
|
)
|
||||||
@ -113,7 +112,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func masterUpgradeGKE(v string) error {
|
func masterUpgradeGKE(v string) error {
|
||||||
e2elog.Logf("Upgrading master to %q", v)
|
Logf("Upgrading master to %q", v)
|
||||||
args := []string{
|
args := []string{
|
||||||
"container",
|
"container",
|
||||||
"clusters",
|
"clusters",
|
||||||
@ -136,7 +135,7 @@ func masterUpgradeGKE(v string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func masterUpgradeKubernetesAnywhere(v string) error {
|
func masterUpgradeKubernetesAnywhere(v string) error {
|
||||||
e2elog.Logf("Upgrading master to %q", v)
|
Logf("Upgrading master to %q", v)
|
||||||
|
|
||||||
kaPath := TestContext.KubernetesAnywherePath
|
kaPath := TestContext.KubernetesAnywherePath
|
||||||
originalConfigPath := filepath.Join(kaPath, ".config")
|
originalConfigPath := filepath.Join(kaPath, ".config")
|
||||||
@ -154,7 +153,7 @@ func masterUpgradeKubernetesAnywhere(v string) error {
|
|||||||
defer func() {
|
defer func() {
|
||||||
// revert .config.bak to .config
|
// revert .config.bak to .config
|
||||||
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
|
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
|
||||||
e2elog.Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
|
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -209,7 +208,7 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't detect number of nodes")
|
return fmt.Errorf("couldn't detect number of nodes")
|
||||||
}
|
}
|
||||||
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
|
||||||
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -230,7 +229,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func nodeUpgradeGKE(v string, img string) error {
|
func nodeUpgradeGKE(v string, img string) error {
|
||||||
e2elog.Logf("Upgrading nodes to version %q and image %q", v, img)
|
Logf("Upgrading nodes to version %q and image %q", v, img)
|
||||||
args := []string{
|
args := []string{
|
||||||
"container",
|
"container",
|
||||||
"clusters",
|
"clusters",
|
||||||
@ -281,7 +280,7 @@ func MigTemplate() (string, error) {
|
|||||||
if val := ParseKVLines(output, key); len(val) > 0 {
|
if val := ParseKVLines(output, key); len(val) > 0 {
|
||||||
url := strings.Split(val, "/")
|
url := strings.Split(val, "/")
|
||||||
templ = url[len(url)-1]
|
templ = url[len(url)-1]
|
||||||
e2elog.Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
|
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
|
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
|
||||||
@ -300,7 +299,7 @@ func gceUpgradeScript() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func waitForSSHTunnels() {
|
func waitForSSHTunnels() {
|
||||||
e2elog.Logf("Waiting for SSH tunnels to establish")
|
Logf("Waiting for SSH tunnels to establish")
|
||||||
RunKubectl("run", "ssh-tunnel-test",
|
RunKubectl("run", "ssh-tunnel-test",
|
||||||
"--image=busybox",
|
"--image=busybox",
|
||||||
"--restart=Never",
|
"--restart=Never",
|
||||||
@ -356,19 +355,19 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
|
|||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
e2elog.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
|
||||||
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("ERROR while stopping node %q: %v", node.Name, err)
|
Logf("ERROR while stopping node %q: %v", node.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(k.config.SimulatedDowntime)
|
time.Sleep(k.config.SimulatedDowntime)
|
||||||
|
|
||||||
e2elog.Logf("Rebooting %q to repair the node", node.Name)
|
Logf("Rebooting %q to repair the node", node.Name)
|
||||||
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
Logf("ERROR while rebooting node %q: %v", node.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
Loading…
Reference in New Issue
Block a user