diff --git a/test/e2e/instrumentation/logging/BUILD b/test/e2e/instrumentation/logging/BUILD index b2a40ddabf7..c5976022ec1 100644 --- a/test/e2e/instrumentation/logging/BUILD +++ b/test/e2e/instrumentation/logging/BUILD @@ -16,7 +16,6 @@ go_library( "//staging/src/k8s.io/api/core/v1:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/config:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/elasticsearch:go_default_library", "//test/e2e/instrumentation/logging/stackdriver:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/BUILD b/test/e2e/instrumentation/logging/elasticsearch/BUILD index 888671ca0a9..02b966671cb 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/BUILD +++ b/test/e2e/instrumentation/logging/elasticsearch/BUILD @@ -20,7 +20,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/service:go_default_library", "//test/e2e/instrumentation/common:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index 0788795d05b..bb0a2ff72bf 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" @@ -64,7 +63,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { // being run as the first e2e test just after the e2e cluster has been created. err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil { - e2elog.Logf("Kibana is unreachable: %v", err) + framework.Logf("Kibana is unreachable: %v", err) return false, nil } return true, nil @@ -86,7 +85,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { req, err := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if err != nil { - e2elog.Logf("Failed to get services proxy request: %v", err) + framework.Logf("Failed to get services proxy request: %v", err) return false, nil } @@ -98,7 +97,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { Name("kibana-logging"). DoRaw() if err != nil { - e2elog.Logf("Proxy call to kibana-logging failed: %v", err) + framework.Logf("Proxy call to kibana-logging failed: %v", err) return false, nil } return true, nil diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index 670c27fbcbb..650d7dafd33 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/fields" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -57,7 +56,7 @@ func newEsLogProvider(f *framework.Framework) (*esLogProvider, error) { func (p *esLogProvider) Init() error { f := p.Framework // Check for the existence of the Elasticsearch service. - e2elog.Logf("Checking the Elasticsearch service exists.") + framework.Logf("Checking the Elasticsearch service exists.") s := f.ClientSet.CoreV1().Services(api.NamespaceSystem) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. @@ -66,14 +65,14 @@ func (p *esLogProvider) Init() error { if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil { break } - e2elog.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) + framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) } if err != nil { return err } // Wait for the Elasticsearch pods to enter the running state. - e2elog.Logf("Checking to make sure the Elasticsearch pods are running") + framework.Logf("Checking to make sure the Elasticsearch pods are running") labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String() options := meta_v1.ListOptions{LabelSelector: labelSelector} pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options) @@ -87,7 +86,7 @@ func (p *esLogProvider) Init() error { } } - e2elog.Logf("Checking to make sure we are talking to an Elasticsearch service.") + framework.Logf("Checking to make sure we are talking to an Elasticsearch service.") // Perform a few checks to make sure this looks like an Elasticsearch cluster. var statusCode int err = nil @@ -95,7 +94,7 @@ func (p *esLogProvider) Init() error { for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } // Query against the root URL for Elasticsearch. @@ -106,11 +105,11 @@ func (p *esLogProvider) Init() error { response.StatusCode(&statusCode) if err != nil { - e2elog.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) + framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } if int(statusCode) != 200 { - e2elog.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) + framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) continue } break @@ -119,17 +118,17 @@ func (p *esLogProvider) Init() error { return err } if int(statusCode) != 200 { - e2elog.Failf("Elasticsearch cluster has a bad status: %v", statusCode) + framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Now assume we really are talking to an Elasticsearch instance. // Check the cluster health. - e2elog.Logf("Checking health of Elasticsearch service.") + framework.Logf("Checking health of Elasticsearch service.") healthy := false for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) { proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) + framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy) continue } body, err = proxyRequest.Namespace(api.NamespaceSystem). @@ -143,17 +142,17 @@ func (p *esLogProvider) Init() error { health := make(map[string]interface{}) err := json.Unmarshal(body, &health) if err != nil { - e2elog.Logf("Bad json response from elasticsearch: %v", err) + framework.Logf("Bad json response from elasticsearch: %v", err) continue } statusIntf, ok := health["status"] if !ok { - e2elog.Logf("No status field found in cluster health response: %v", health) + framework.Logf("No status field found in cluster health response: %v", health) continue } status := statusIntf.(string) if status != "green" && status != "yellow" { - e2elog.Logf("Cluster health has bad status: %v", health) + framework.Logf("Cluster health has bad status: %v", health) continue } if err == nil && ok { @@ -177,12 +176,12 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - e2elog.Logf("Failed to get services proxy request: %v", errProxy) + framework.Logf("Failed to get services proxy request: %v", errProxy) return nil } query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name) - e2elog.Logf("Sending a search request to Elasticsearch with the following query: %s", query) + framework.Logf("Sending a search request to Elasticsearch with the following query: %s", query) // Ask Elasticsearch to return all the log lines that were tagged with the // pod name. Ask for ten times as many log lines because duplication is possible. @@ -194,26 +193,26 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { Param("size", strconv.Itoa(searchPageSize)). DoRaw() if err != nil { - e2elog.Logf("Failed to make proxy call to elasticsearch-logging: %v", err) + framework.Logf("Failed to make proxy call to elasticsearch-logging: %v", err) return nil } var response map[string]interface{} err = json.Unmarshal(body, &response) if err != nil { - e2elog.Logf("Failed to unmarshal response: %v", err) + framework.Logf("Failed to unmarshal response: %v", err) return nil } hits, ok := response["hits"].(map[string]interface{}) if !ok { - e2elog.Logf("response[hits] not of the expected type: %T", response["hits"]) + framework.Logf("response[hits] not of the expected type: %T", response["hits"]) return nil } h, ok := hits["hits"].([]interface{}) if !ok { - e2elog.Logf("Hits not of the expected type: %T", hits["hits"]) + framework.Logf("Hits not of the expected type: %T", hits["hits"]) return nil } @@ -222,13 +221,13 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { for _, e := range h { l, ok := e.(map[string]interface{}) if !ok { - e2elog.Logf("Element of hit not of expected type: %T", e) + framework.Logf("Element of hit not of expected type: %T", e) continue } source, ok := l["_source"].(map[string]interface{}) if !ok { - e2elog.Logf("_source not of the expected type: %T", l["_source"]) + framework.Logf("_source not of the expected type: %T", l["_source"]) continue } @@ -244,7 +243,7 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry { continue } - e2elog.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source) + framework.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source) } return entries diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index 116e9fa18c5..caa33b537ed 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -27,7 +27,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/config" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -61,14 +60,14 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti defer wg.Done() defer ginkgo.GinkgoRecover() wave := fmt.Sprintf("wave%v", strconv.Itoa(i)) - e2elog.Logf("Starting logging soak, wave = %v", wave) + framework.Logf("Starting logging soak, wave = %v", wave) RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime) - e2elog.Logf("Completed logging soak, wave %v", i) + framework.Logf("Completed logging soak, wave %v", i) }() // Niceness. time.Sleep(loggingSoak.TimeBetweenWaves) } - e2elog.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale) + framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale) wg.Wait() }) }) @@ -122,8 +121,8 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness) if err != nil { - e2elog.Failf("Error in wait... %v", err) + framework.Failf("Error in wait... %v", err) } else if len(pods) < totalPods { - e2elog.Failf("Only got %v out of %v", len(pods), totalPods) + framework.Failf("Only got %v out of %v", len(pods), totalPods) } } diff --git a/test/e2e/instrumentation/logging/stackdriver/BUILD b/test/e2e/instrumentation/logging/stackdriver/BUILD index d1bff9cd3ea..effafb418da 100644 --- a/test/e2e/instrumentation/logging/stackdriver/BUILD +++ b/test/e2e/instrumentation/logging/stackdriver/BUILD @@ -18,7 +18,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/stackdriver/basic.go b/test/e2e/instrumentation/logging/stackdriver/basic.go index e98ab9ebe40..3524444e7eb 100644 --- a/test/e2e/instrumentation/logging/stackdriver/basic.go +++ b/test/e2e/instrumentation/logging/stackdriver/basic.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -156,7 +155,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID())) err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f) if err != nil { - e2elog.Logf("Failed to create a logging pod: %v", err) + framework.Logf("Failed to create a logging pod: %v", err) } return false, nil }, stopCh) diff --git a/test/e2e/instrumentation/logging/stackdriver/soak.go b/test/e2e/instrumentation/logging/stackdriver/soak.go index d9dd44c2d9a..14df92bdcc1 100644 --- a/test/e2e/instrumentation/logging/stackdriver/soak.go +++ b/test/e2e/instrumentation/logging/stackdriver/soak.go @@ -22,7 +22,6 @@ import ( "time" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" @@ -86,7 +85,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd // Starting one pod on each node. for _, pod := range podsByRun[runIdx] { if err := pod.Start(f); err != nil { - e2elog.Logf("Failed to start pod: %v", err) + framework.Logf("Failed to start pod: %v", err) } } <-t.C diff --git a/test/e2e/instrumentation/logging/stackdriver/utils.go b/test/e2e/instrumentation/logging/stackdriver/utils.go index 98b203d928a..27360474a83 100644 --- a/test/e2e/instrumentation/logging/stackdriver/utils.go +++ b/test/e2e/instrumentation/logging/stackdriver/utils.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" "golang.org/x/oauth2/google" @@ -118,7 +117,7 @@ func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, project return err } if len(listResponse.Sinks) >= stackdriverSinkCountLimit { - e2elog.Logf("Reached Stackdriver sink limit. Deleting all sinks") + framework.Logf("Reached Stackdriver sink limit. Deleting all sinks") deleteSinks(sinksService, projectID, listResponse.Sinks) } return nil @@ -137,7 +136,7 @@ func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks for _, sink := range sinks { sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name) if _, err := sinksService.Delete(sinkNameID).Do(); err != nil { - e2elog.Logf("Failed to delete LogSink: %v", err) + framework.Logf("Failed to delete LogSink: %v", err) } } } @@ -186,21 +185,21 @@ func (p *sdLogProvider) Cleanup() { sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name) sinksService := p.sdService.Projects.Sinks if _, err := sinksService.Delete(sinkNameID).Do(); err != nil { - e2elog.Logf("Failed to delete LogSink: %v", err) + framework.Logf("Failed to delete LogSink: %v", err) } } if p.subscription != nil { subsService := p.pubsubService.Projects.Subscriptions if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil { - e2elog.Logf("Failed to delete PubSub subscription: %v", err) + framework.Logf("Failed to delete PubSub subscription: %v", err) } } if p.topic != nil { topicsService := p.pubsubService.Projects.Topics if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil { - e2elog.Logf("Failed to delete PubSub topic: %v", err) + framework.Logf("Failed to delete PubSub topic: %v", err) } } } @@ -235,7 +234,7 @@ func (p *sdLogProvider) createSink(projectID, sinkName, topicName string) (*sd.L if err != nil { return nil, err } - e2elog.Logf("Using the following filter for log entries: %s", filter) + framework.Logf("Using the following filter for log entries: %s", filter) sink := &sd.LogSink{ Name: sinkName, Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName), @@ -281,20 +280,20 @@ func (p *sdLogProvider) authorizeSink() error { } func (p *sdLogProvider) waitSinkInit() error { - e2elog.Logf("Waiting for log sink to become operational") + framework.Logf("Waiting for log sink to become operational") return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) { err := publish(p.pubsubService, p.topic, "embrace eternity") if err != nil { - e2elog.Logf("Failed to push message to PubSub due to %v", err) + framework.Logf("Failed to push message to PubSub due to %v", err) } messages, err := pullAndAck(p.pubsubService, p.subscription) if err != nil { - e2elog.Logf("Failed to pull messages from PubSub due to %v", err) + framework.Logf("Failed to pull messages from PubSub due to %v", err) return false, nil } if len(messages) > 0 { - e2elog.Logf("Sink %s is operational", p.logSink.Name) + framework.Logf("Sink %s is operational", p.logSink.Name) return true, nil } @@ -319,32 +318,32 @@ func (p *sdLogProvider) startPollingLogs() { func (p *sdLogProvider) pollLogsOnce() { messages, err := pullAndAck(p.pubsubService, p.subscription) if err != nil { - e2elog.Logf("Failed to pull messages from PubSub due to %v", err) + framework.Logf("Failed to pull messages from PubSub due to %v", err) return } for _, msg := range messages { logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data) if err != nil { - e2elog.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) + framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data) continue } var sdLogEntry sd.LogEntry if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil { - e2elog.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) + framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err) continue } name, ok := p.tryGetName(sdLogEntry) if !ok { - e2elog.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) + framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type) continue } logEntry, err := convertLogEntry(sdLogEntry) if err != nil { - e2elog.Logf("Failed to parse Stackdriver LogEntry: %v", err) + framework.Logf("Failed to parse Stackdriver LogEntry: %v", err) continue } @@ -408,7 +407,7 @@ func pullAndAck(service *pubsub.Service, subs *pubsub.Subscription) ([]*pubsub.R if len(ids) > 0 { ackReq := &pubsub.AcknowledgeRequest{AckIds: ids} if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil { - e2elog.Logf("Failed to ack poll: %v", err) + framework.Logf("Failed to ack poll: %v", err) } } diff --git a/test/e2e/instrumentation/logging/utils/BUILD b/test/e2e/instrumentation/logging/utils/BUILD index e17a07ce52c..201f36accb3 100644 --- a/test/e2e/instrumentation/logging/utils/BUILD +++ b/test/e2e/instrumentation/logging/utils/BUILD @@ -25,7 +25,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", diff --git a/test/e2e/instrumentation/logging/utils/logging_agent.go b/test/e2e/instrumentation/logging/utils/logging_agent.go index 001099253de..761a9cbc60c 100644 --- a/test/e2e/instrumentation/logging/utils/logging_agent.go +++ b/test/e2e/instrumentation/logging/utils/logging_agent.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/labels" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/utils/integer" ) @@ -68,13 +67,13 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max for _, pod := range agentPods.Items { contStatuses := pod.Status.ContainerStatuses if len(contStatuses) == 0 { - e2elog.Logf("There are no container statuses for pod %s", pod.Name) + framework.Logf("There are no container statuses for pod %s", pod.Name) continue } restartCount := int(contStatuses[0].RestartCount) maxRestartCount = integer.IntMax(maxRestartCount, restartCount) - e2elog.Logf("Logging agent %s on node %s was restarted %d times", + framework.Logf("Logging agent %s on node %s was restarted %d times", pod.Name, pod.Spec.NodeName, restartCount) } diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index 160ca0294d2..7c34bf47164 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -95,7 +94,7 @@ func (p *loadLoggingPod) Name() string { } func (p *loadLoggingPod) Start(f *framework.Framework) error { - e2elog.Logf("Starting load logging pod %s", p.name) + framework.Logf("Starting load logging pod %s", p.name) f.PodClient().Create(&v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, @@ -161,7 +160,7 @@ func (p *execLoggingPod) Name() string { } func (p *execLoggingPod) Start(f *framework.Framework) error { - e2elog.Logf("Starting repeating logging pod %s", p.name) + framework.Logf("Starting repeating logging pod %s", p.name) f.PodClient().Create(&v1.Pod{ ObjectMeta: meta_v1.ObjectMeta{ Name: p.name, diff --git a/test/e2e/instrumentation/logging/utils/wait.go b/test/e2e/instrumentation/logging/utils/wait.go index 391d8151266..ae4b1608bb8 100644 --- a/test/e2e/instrumentation/logging/utils/wait.go +++ b/test/e2e/instrumentation/logging/utils/wait.go @@ -22,7 +22,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/wait" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + "k8s.io/kubernetes/test/e2e/framework" ) // LogChecker is an interface for an entity that can check whether logging @@ -195,14 +195,14 @@ func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64) totalWant += want } if len(lossMsgs) > 0 { - e2elog.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n")) + framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n")) } lostFrac := 1 - float64(totalGot)/float64(totalWant) if lostFrac > slack { return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable", lostFrac*100, slack*100) } - e2elog.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%", + framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%", lostFrac*100, slack*100) return nil }