Merge pull request #81693 from oomichi/replace-e2elog-framework-c-n

Use log functions of core framework on [c-n]
This commit is contained in:
Kubernetes Prow Robot 2019-08-24 04:48:24 -07:00 committed by GitHub
commit 8e05e8346a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 112 additions and 122 deletions

View File

@ -34,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/cache"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
@ -81,7 +80,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := testfiles.Read(fileName)
if err != nil {
e2elog.Failf("reading manifest file: %v", err)
Failf("reading manifest file: %v", err)
}
// Split at the "---" separator before working on
@ -117,7 +116,7 @@ func visitManifests(cb func([]byte) error, files ...string) error {
func (f *Framework) PatchItems(items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// e2elog.Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := f.patchItemRecursively(item); err != nil {
return err
}
@ -156,7 +155,7 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
e2elog.Logf("deleting failed: %s", err)
Logf("deleting failed: %s", err)
}
}
}
@ -169,12 +168,12 @@ func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
description := DescribeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
e2elog.Logf("creating %s", description)
Logf("creating %s", description)
for _, factory := range factories {
destructor, err := factory.Create(f, item)
if destructor != nil {
destructors = append(destructors, func() error {
e2elog.Logf("deleting %s", description)
Logf("deleting %s", description)
return destructor()
})
}
@ -417,7 +416,7 @@ func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, er
return nil, errorItemNotSupported
}
e2elog.Logf("Define cluster role %v", item.GetName())
Logf("Define cluster role %v", item.GetName())
client := f.ClientSet.RbacV1().ClusterRoles()
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")

View File

@ -27,7 +27,6 @@ import (
"k8s.io/client-go/kubernetes/scheme"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/gomega"
)
@ -49,7 +48,7 @@ type ExecOptions struct {
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
e2elog.Logf("ExecWithOptions %+v", options)
Logf("ExecWithOptions %+v", options)
config, err := LoadConfig()
ExpectNoError(err, "failed to load restclient config")
@ -98,7 +97,7 @@ func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName
// ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
e2elog.Logf("Exec stderr: %q", stderr)
Logf("Exec stderr: %q", stderr)
ExpectNoError(err,
"failed to execute command in pod %v, container %v: %v",
podName, containerName, err)

View File

@ -21,7 +21,6 @@ import (
"fmt"
"sync"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
@ -60,7 +59,7 @@ func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...inter
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
e2elog.Logf(msg)
Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)

View File

@ -46,7 +46,6 @@ import (
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epsp "k8s.io/kubernetes/test/e2e/framework/psp"
@ -225,7 +224,7 @@ func (f *Framework) BeforeEach() {
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err)
} else {
e2elog.Logf("Skipping waiting for service account")
Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
@ -253,7 +252,7 @@ func (f *Framework) BeforeEach() {
PrintVerboseLogs: false,
}, nil)
if err != nil {
e2elog.Logf("Error while creating NewResourceUsageGatherer: %v", err)
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.StartGatheringData()
}
@ -274,13 +273,13 @@ func (f *Framework) BeforeEach() {
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
e2elog.Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
} else {
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
if err != nil {
e2elog.Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
} else {
e2elog.Logf("Gathered ClusterAutoscaler metrics before test")
Logf("Gathered ClusterAutoscaler metrics before test")
}
}
@ -311,15 +310,15 @@ func (f *Framework) AfterEach() {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
} else {
e2elog.Logf("Namespace %v was already deleted", ns.Name)
Logf("Namespace %v was already deleted", ns.Name)
}
}
}
} else {
if !TestContext.DeleteNamespace {
e2elog.Logf("Found DeleteNamespace=false, skipping namespace deletion!")
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
} else {
e2elog.Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
}
}
@ -334,7 +333,7 @@ func (f *Framework) AfterEach() {
for namespaceKey, namespaceErr := range nsDeletionErrors {
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
}
e2elog.Failf(strings.Join(messages, ","))
Failf(strings.Join(messages, ","))
}
}()
@ -366,11 +365,11 @@ func (f *Framework) AfterEach() {
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
grabber, err := e2emetrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
if err != nil {
e2elog.Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
} else {
received, err := grabber.Grab()
if err != nil {
e2elog.Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
}
(*e2emetrics.ComponentCollection)(&received).ComputeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
f.TestSummaries = append(f.TestSummaries, (*e2emetrics.ComponentCollection)(&received))
@ -391,7 +390,7 @@ func (f *Framework) AfterEach() {
// This is explicitly done at the very end of the test, to avoid
// e.g. not removing namespace in case of this failure.
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
e2elog.Failf("All nodes should be ready after test, %v", err)
Failf("All nodes should be ready after test, %v", err)
}
}
@ -490,7 +489,7 @@ func (f *Framework) WriteFileViaContainer(podName, containerName string, path st
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil {
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return err
}
@ -501,7 +500,7 @@ func (f *Framework) ReadFileViaContainer(podName, containerName string, path str
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
@ -512,7 +511,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
}
return string(stdout), err
}
@ -549,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
TargetPort: intstr.FromInt(contPort),
}}
}
e2elog.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
@ -575,7 +574,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
for i, node := range nodes.Items {
// one per node, but no more than maxCount.
if i <= maxCount {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
@ -646,19 +645,19 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 {
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
}
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue
}
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second)
continue
}
@ -683,7 +682,7 @@ func kubectlExec(namespace string, podName, containerName string, args ...string
cmd := KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err
}
@ -790,7 +789,7 @@ func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Names
ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
e2elog.Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
}
@ -805,7 +804,7 @@ ReturnPodsSoFar:
}
passesVerify, err := passesVerifyFilter(pod, p.Verify)
if err != nil {
e2elog.Logf("Error detected on %v : %v !", pod.Name, err)
Logf("Error detected on %v : %v !", pod.Name, err)
break ReturnPodsSoFar
}
if passesVerify {
@ -826,12 +825,12 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Failure
if returnedErr != nil {
e2elog.Logf("Cutting polling short: We got an error from the pod filtering layer.")
Logf("Cutting polling short: We got an error from the pod filtering layer.")
// stop polling if the pod filtering returns an error. that should never happen.
// it indicates, for example, that the client is broken or something non-pod related.
return false, returnedErr
}
e2elog.Logf("Found %v / %v", len(pods), atLeast)
Logf("Found %v / %v", len(pods), atLeast)
// Success
if len(pods) >= atLeast {
@ -840,7 +839,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
// Keep trying...
return false, nil
})
e2elog.Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
return pods, err
}
@ -848,7 +847,7 @@ func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
pods, err := cl.WaitFor(atLeast, timeout)
if err != nil || len(pods) < atLeast {
e2elog.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}
@ -861,14 +860,14 @@ func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
e2elog.Failf("No pods matched the filter.")
Failf("No pods matched the filter.")
}
e2elog.Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
for _, p := range pods {
podFunc(p)
}
} else {
e2elog.Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
}
return err
@ -880,7 +879,7 @@ func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
return func(format string, args ...interface{}) {
writer := bufio.NewWriter(file)
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
e2elog.Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
}
writer.Flush()
}

View File

@ -21,7 +21,6 @@ import (
"fmt"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -47,7 +46,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get kubernetes component resource usage
sshResult, err := getMasterUsageByPrefix("kube")
if err != nil {
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
return nil
}
scanner := bufio.NewScanner(strings.NewReader(sshResult))
@ -65,7 +64,7 @@ func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsag
// Get etcd resource usage
sshResult, err = getMasterUsageByPrefix("bin/etcd")
if err != nil {
e2elog.Logf("Error when trying to SSH to master machine. Skipping probe")
Logf("Error when trying to SSH to master machine. Skipping probe")
return nil
}
scanner = bufio.NewScanner(strings.NewReader(sshResult))

View File

@ -23,8 +23,6 @@ import (
"os/exec"
"path/filepath"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// TODO: These should really just use the GCE API client library or at least use
@ -48,9 +46,9 @@ func lookupClusterImageSources() (string, string, error) {
str = strings.Replace(str, ";", "\n", -1)
lines := strings.Split(str, "\n")
if err != nil {
e2elog.Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
for _, l := range lines {
e2elog.Logf(" > %s", l)
Logf(" > %s", l)
}
}
return lines, err
@ -114,11 +112,11 @@ func lookupClusterImageSources() (string, string, error) {
func LogClusterImageSources() {
masterImg, nodeImg, err := lookupClusterImageSources()
if err != nil {
e2elog.Logf("Cluster image sources lookup failed: %v\n", err)
Logf("Cluster image sources lookup failed: %v\n", err)
return
}
e2elog.Logf("cluster-master-image: %s", masterImg)
e2elog.Logf("cluster-node-image: %s", nodeImg)
Logf("cluster-master-image: %s", masterImg)
Logf("cluster-node-image: %s", nodeImg)
images := map[string]string{
"master_os_image": masterImg,
@ -128,7 +126,7 @@ func LogClusterImageSources() {
outputBytes, _ := json.MarshalIndent(images, "", " ")
filePath := filepath.Join(TestContext.ReportDir, "images.json")
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
e2elog.Logf("cluster images sources, could not write to %q: %v", filePath, err)
Logf("cluster images sources, could not write to %q: %v", filePath, err)
}
}

View File

@ -26,7 +26,6 @@ import (
"time"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -259,10 +258,10 @@ func (g *LogSizeGatherer) Work() bool {
TestContext.Provider,
)
if err != nil {
e2elog.Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
// In case of repeated error give up.
if workItem.backoffMultiplier >= 128 {
e2elog.Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
g.wg.Done()
return false
}
@ -278,7 +277,7 @@ func (g *LogSizeGatherer) Work() bool {
path := results[i]
size, err := strconv.Atoi(results[i+1])
if err != nil {
e2elog.Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue
}
g.data.addNewData(workItem.ip, path, now, size)

View File

@ -38,7 +38,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -172,10 +171,10 @@ func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets
if foundEndpoints.Has(e.Name) {
continue
}
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
e2elog.Logf(desc)
Logf(desc)
}
}
@ -220,11 +219,11 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else {
var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err)
continue
}
@ -236,7 +235,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
}
}
}
e2elog.Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
@ -247,7 +246,7 @@ func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, tar
}
config.diagnoseMissingEndpoints(eps)
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
}
// GetEndpointsFromTestContainer executes a curl via kubectl exec in a test container.
@ -279,12 +278,12 @@ func (config *NetworkingTestConfig) GetEndpointsFromContainer(protocol, containe
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", cmd, err, stdout, stderr)
} else {
e2elog.Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
Logf("Tries: %d, in try: %d, stdout: %v, stderr: %v, command run in: %#v", tries, i, stdout, stderr, config.HostTestContainerPod)
var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
e2elog.Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err)
continue
}
@ -338,7 +337,7 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
e2elog.Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else {
trimmed := strings.TrimSpace(stdout)
if trimmed != "" {
@ -348,18 +347,18 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
// Check against i+1 so we exit if minTries == maxTries.
if eps.Equal(expectedEps) && i+1 >= minTries {
e2elog.Logf("Found all expected endpoints: %+v", eps.List())
Logf("Found all expected endpoints: %+v", eps.List())
return
}
e2elog.Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
Logf("Waiting for %+v endpoints (expected=%+v, actual=%+v)", expectedEps.Difference(eps).List(), expectedEps.List(), eps.List())
// TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay)
}
config.diagnoseMissingEndpoints(eps)
e2elog.Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", maxTries, cmd, eps, expectedEps)
}
// GetSelfURL executes a curl against the given path via kubectl exec into a
@ -392,21 +391,21 @@ func (config *NetworkingTestConfig) executeCurlCmd(cmd string, expected string)
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
e2elog.Logf(msg)
Logf(msg)
return false, nil
}
if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
e2elog.Logf(msg)
Logf(msg)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
e2elog.Logf("%s", desc)
e2elog.Failf("Timed out in %v: %v", retryTimeout, msg)
Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg)
}
}
@ -550,12 +549,12 @@ func (config *NetworkingTestConfig) createTestPods() {
var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
}
@ -675,12 +674,12 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
// wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil {
e2elog.Failf("Failed to delete %s pod: %v", pod.Name, err)
Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil {
e2elog.Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
@ -707,12 +706,12 @@ func CheckReachabilityFromPod(expectToBeReachable bool, timeout time.Duration, n
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil {
e2elog.Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil
}
if !expectToBeReachable && err == nil {
e2elog.Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil
}
return true, nil
@ -780,11 +779,11 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
// Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s.
if host == "" {
e2elog.Failf("Got empty host for HTTP poke (%s)", url)
Failf("Got empty host for HTTP poke (%s)", url)
return ret
}
if port == 0 {
e2elog.Failf("Got port==0 for HTTP poke (%s)", url)
Failf("Got port==0 for HTTP poke (%s)", url)
return ret
}
@ -796,7 +795,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
params.ExpectCode = http.StatusOK
}
e2elog.Logf("Poking %q", url)
Logf("Poking %q", url)
resp, err := httpGetNoConnectionPoolTimeout(url, params.Timeout)
if err != nil {
@ -809,7 +808,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
} else {
ret.Status = HTTPError
}
e2elog.Logf("Poke(%q): %v", url, err)
Logf("Poke(%q): %v", url, err)
return ret
}
@ -820,7 +819,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if err != nil {
ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Body = make([]byte, len(body))
@ -831,25 +830,25 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
if resp.StatusCode == code {
ret.Error = fmt.Errorf("retriable status code: %d", resp.StatusCode)
ret.Status = HTTPRetryCode
e2elog.Logf("Poke(%q): %v", url, ret.Error)
Logf("Poke(%q): %v", url, ret.Error)
return ret
}
}
ret.Status = HTTPWrongCode
ret.Error = fmt.Errorf("bad status code: %d", resp.StatusCode)
e2elog.Logf("Poke(%q): %v", url, ret.Error)
Logf("Poke(%q): %v", url, ret.Error)
return ret
}
if params.BodyContains != "" && !strings.Contains(string(body), params.BodyContains) {
ret.Status = HTTPBadResponse
ret.Error = fmt.Errorf("response does not contain expected substring: %q", string(body))
e2elog.Logf("Poke(%q): %v", url, ret.Error)
Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Status = HTTPSuccess
e2elog.Logf("Poke(%q): success", url)
Logf("Poke(%q): success", url)
return ret
}
@ -916,11 +915,11 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
// Sanity check inputs, because it has happened. These are the only things
// that should hard fail the test - they are basically ASSERT()s.
if host == "" {
e2elog.Failf("Got empty host for UDP poke (%s)", url)
Failf("Got empty host for UDP poke (%s)", url)
return ret
}
if port == 0 {
e2elog.Failf("Got port==0 for UDP poke (%s)", url)
Failf("Got port==0 for UDP poke (%s)", url)
return ret
}
@ -929,13 +928,13 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
params = &UDPPokeParams{}
}
e2elog.Logf("Poking %v", url)
Logf("Poking %v", url)
con, err := net.Dial("udp", hostPort)
if err != nil {
ret.Status = UDPError
ret.Error = err
e2elog.Logf("Poke(%q): %v", url, err)
Logf("Poke(%q): %v", url, err)
return ret
}
@ -950,7 +949,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else {
ret.Status = UDPError
}
e2elog.Logf("Poke(%q): %v", url, err)
Logf("Poke(%q): %v", url, err)
return ret
}
@ -959,7 +958,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if err != nil {
ret.Status = UDPError
ret.Error = err
e2elog.Logf("Poke(%q): %v", url, err)
Logf("Poke(%q): %v", url, err)
return ret
}
}
@ -980,7 +979,7 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
} else {
ret.Status = UDPError
}
e2elog.Logf("Poke(%q): %v", url, err)
Logf("Poke(%q): %v", url, err)
return ret
}
ret.Response = buf[0:n]
@ -988,12 +987,12 @@ func PokeUDP(host string, port int, request string, params *UDPPokeParams) UDPPo
if params.Response != "" && string(ret.Response) != params.Response {
ret.Status = UDPBadResponse
ret.Error = fmt.Errorf("response does not match expected string: %q", string(ret.Response))
e2elog.Logf("Poke(%q): %v", url, ret.Error)
Logf("Poke(%q): %v", url, ret.Error)
return ret
}
ret.Status = UDPSuccess
e2elog.Logf("Poke(%q): success", url)
Logf("Poke(%q): success", url)
return ret
}
@ -1005,7 +1004,7 @@ func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Dur
// TestHitNodesFromOutsideWithCount checkes HTTP connectivity from outside with count.
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
countToSucceed int) error {
e2elog.Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
hittedHosts := sets.NewString()
count := 0
condition := func() (bool, error) {
@ -1016,13 +1015,13 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
hittedHost := strings.TrimSpace(string(result.Body))
if !expectedHosts.Has(hittedHost) {
e2elog.Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
count = 0
return false, nil
}
if !hittedHosts.Has(hittedHost) {
hittedHosts.Insert(hittedHost)
e2elog.Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
}
if hittedHosts.Equal(expectedHosts) {
count++
@ -1047,7 +1046,7 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host, err := e2enode.GetExternalIP(node)
if err != nil {
e2elog.Failf("Error getting node external ip : %v", err)
Failf("Error getting node external ip : %v", err)
}
masterAddresses := GetAllMasterAddresses(c)
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
@ -1062,17 +1061,17 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
}
}()
e2elog.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
}
e2elog.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
e2elog.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
testFunc()

View File

@ -28,7 +28,6 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
)
@ -113,7 +112,7 @@ func appendContainerCommandGroupIfNeeded(args []string) []string {
}
func masterUpgradeGKE(v string) error {
e2elog.Logf("Upgrading master to %q", v)
Logf("Upgrading master to %q", v)
args := []string{
"container",
"clusters",
@ -136,7 +135,7 @@ func masterUpgradeGKE(v string) error {
}
func masterUpgradeKubernetesAnywhere(v string) error {
e2elog.Logf("Upgrading master to %q", v)
Logf("Upgrading master to %q", v)
kaPath := TestContext.KubernetesAnywherePath
originalConfigPath := filepath.Join(kaPath, ".config")
@ -154,7 +153,7 @@ func masterUpgradeKubernetesAnywhere(v string) error {
defer func() {
// revert .config.bak to .config
if err := os.Rename(backupConfigPath, originalConfigPath); err != nil {
e2elog.Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
Logf("Could not rename %s back to %s", backupConfigPath, originalConfigPath)
}
}()
@ -209,7 +208,7 @@ func waitForNodesReadyAfterUpgrade(f *Framework) error {
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
e2elog.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
@ -230,7 +229,7 @@ func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
}
func nodeUpgradeGKE(v string, img string) error {
e2elog.Logf("Upgrading nodes to version %q and image %q", v, img)
Logf("Upgrading nodes to version %q and image %q", v, img)
args := []string{
"container",
"clusters",
@ -281,7 +280,7 @@ func MigTemplate() (string, error) {
if val := ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
e2elog.Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
@ -300,7 +299,7 @@ func gceUpgradeScript() string {
}
func waitForSSHTunnels() {
e2elog.Logf("Waiting for SSH tunnels to establish")
Logf("Waiting for SSH tunnels to establish")
RunKubectl("run", "ssh-tunnel-test",
"--image=busybox",
"--restart=Never",
@ -356,19 +355,19 @@ func (k *NodeKiller) kill(nodes []v1.Node) {
go func() {
defer wg.Done()
e2elog.Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
Logf("Stopping docker and kubelet on %q to simulate failure", node.Name)
err := e2essh.IssueSSHCommand("sudo systemctl stop docker kubelet", k.provider, &node)
if err != nil {
e2elog.Logf("ERROR while stopping node %q: %v", node.Name, err)
Logf("ERROR while stopping node %q: %v", node.Name, err)
return
}
time.Sleep(k.config.SimulatedDowntime)
e2elog.Logf("Rebooting %q to repair the node", node.Name)
Logf("Rebooting %q to repair the node", node.Name)
err = e2essh.IssueSSHCommand("sudo reboot", k.provider, &node)
if err != nil {
e2elog.Logf("ERROR while rebooting node %q: %v", node.Name, err)
Logf("ERROR while rebooting node %q: %v", node.Name, err)
return
}
}()