From da7507500f4dd21a8dbb7e13b29812e495bd296c Mon Sep 17 00:00:00 2001 From: draveness Date: Sat, 4 May 2019 08:58:46 +0800 Subject: [PATCH] refactor: use e2elog.Logf instead of framework.Logf --- test/e2e/BUILD | 1 + test/e2e/e2e.go | 35 ++++++------- test/e2e/examples.go | 7 +-- test/e2e/gke_local_ssd.go | 7 +-- test/e2e/gke_node_pools.go | 27 +++++----- test/e2e/scheduling/BUILD | 1 + .../equivalence_cache_predicates.go | 3 +- test/e2e/scheduling/limit_range.go | 15 +++--- test/e2e/scheduling/nvidia-gpus.go | 27 +++++----- test/e2e/scheduling/predicates.go | 17 ++++--- test/e2e/scheduling/preemption.go | 29 +++++------ test/e2e/scheduling/priorities.go | 13 ++--- test/e2e/scheduling/taints.go | 49 +++++++++---------- test/e2e/scheduling/ubernetes_lite.go | 5 +- test/e2e/scheduling/ubernetes_lite_volumes.go | 7 +-- test/e2e/ui/BUILD | 1 + test/e2e/ui/dashboard.go | 7 +-- 17 files changed, 133 insertions(+), 118 deletions(-) diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 73b6ad0d065..33ea5709d42 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -66,6 +66,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/providers/aws:go_default_library", "//test/e2e/framework/providers/azure:go_default_library", diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 27e3000501d..317cc60e889 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -38,6 +38,7 @@ import ( commontest "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/manifest" testutils "k8s.io/kubernetes/test/utils" @@ -119,26 +120,26 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // number equal to the number of allowed not-ready nodes). if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) - framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf) runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { - framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) + e2elog.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) } // Log the version of the server and this client. - framework.Logf("e2e test version: %s", version.Get().GitVersion) + e2elog.Logf("e2e test version: %s", version.Get().GitVersion) dc := c.DiscoveryClient serverVersion, serverErr := dc.ServerVersion() if serverErr != nil { - framework.Logf("Unexpected server error retrieving version: %v", serverErr) + e2elog.Logf("Unexpected server error retrieving version: %v", serverErr) } if serverVersion != nil { - framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion) + e2elog.Logf("kube-apiserver version: %s", serverVersion.GitVersion) } // Reference common test to make the import valid. @@ -160,17 +161,17 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // and then the function that only runs on the first Ginkgo node. var _ = ginkgo.SynchronizedAfterSuite(func() { // Run on all Ginkgo nodes - framework.Logf("Running AfterSuite actions on all nodes") + e2elog.Logf("Running AfterSuite actions on all nodes") framework.RunCleanupActions() }, func() { // Run only Ginkgo on node 1 - framework.Logf("Running AfterSuite actions on node 1") + e2elog.Logf("Running AfterSuite actions on node 1") if framework.TestContext.ReportDir != "" { framework.CoreDump(framework.TestContext.ReportDir) } if framework.TestContext.GatherSuiteMetricsAfterTest { if err := gatherTestSuiteMetrics(); err != nil { - framework.Logf("Error gathering metrics: %v", err) + e2elog.Logf("Error gathering metrics: %v", err) } } if framework.TestContext.NodeKiller.Enabled { @@ -179,7 +180,7 @@ var _ = ginkgo.SynchronizedAfterSuite(func() { }) func gatherTestSuiteMetrics() error { - framework.Logf("Gathering metrics") + e2elog.Logf("Gathering metrics") c, err := framework.LoadClientset() if err != nil { return fmt.Errorf("error loading client: %v", err) @@ -204,7 +205,7 @@ func gatherTestSuiteMetrics() error { return fmt.Errorf("error writing to %q: %v", filePath, err) } } else { - framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) + e2elog.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) } return nil @@ -246,31 +247,31 @@ func RunE2ETests(t *testing.T) { // to flip to Ready, log its output and delete it. func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { path := "test/images/clusterapi-tester/pod.yaml" - framework.Logf("Parsing pod from %v", path) + e2elog.Logf("Parsing pod from %v", path) p, err := manifest.PodFromManifest(path) if err != nil { - framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) + e2elog.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err) return } p.Namespace = ns if _, err := c.CoreV1().Pods(ns).Create(p); err != nil { - framework.Logf("Failed to create %v: %v", p.Name, err) + e2elog.Logf("Failed to create %v: %v", p.Name, err) return } defer func() { if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil { - framework.Logf("Failed to delete pod %v: %v", p.Name, err) + e2elog.Logf("Failed to delete pod %v: %v", p.Name, err) } }() timeout := 5 * time.Minute if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { - framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) + e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) return } logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) if err != nil { - framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err) + e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err) } else { - framework.Logf("Output of clusterapi-tester:\n%v", logs) + e2elog.Logf("Output of clusterapi-tester:\n%v", logs) } } diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 950691b4dbe..a630703fad2 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -31,6 +31,7 @@ import ( commonutils "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/testfiles" . "github.com/onsi/ginkgo" @@ -82,14 +83,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) - framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) + e2elog.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) if stat.RestartCount > 0 { - framework.Logf("Saw %v restart, succeeded...", podName) + e2elog.Logf("Saw %v restart, succeeded...", podName) wg.Done() return } } - framework.Logf("Failed waiting for %v restart! ", podName) + e2elog.Logf("Failed waiting for %v restart! ", podName) passed = false wg.Done() } diff --git a/test/e2e/gke_local_ssd.go b/test/e2e/gke_local_ssd.go index 7e4965ec478..b51f9bd9ac2 100644 --- a/test/e2e/gke_local_ssd.go +++ b/test/e2e/gke_local_ssd.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" . "github.com/onsi/ginkgo" ) @@ -37,14 +38,14 @@ var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() { }) It("should write and read from node local SSD [Feature:GKELocalSSD]", func() { - framework.Logf("Start local SSD test") + e2elog.Logf("Start local SSD test") createNodePoolWithLocalSsds("np-ssd") doTestWriteAndReadToLocalSsd(f) }) }) func createNodePoolWithLocalSsds(nodePoolName string) { - framework.Logf("Create node pool: %s with local SSDs in cluster: %s ", + e2elog.Logf("Create node pool: %s with local SSDs in cluster: %s ", nodePoolName, framework.TestContext.CloudConfig.Cluster) out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create", nodePoolName, @@ -53,7 +54,7 @@ func createNodePoolWithLocalSsds(nodePoolName string) { if err != nil { framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out)) } - framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out)) + e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out)) } func doTestWriteAndReadToLocalSsd(f *framework.Framework) { diff --git a/test/e2e/gke_node_pools.go b/test/e2e/gke_node_pools.go index 856034b4db6..784f5e06887 100644 --- a/test/e2e/gke_node_pools.go +++ b/test/e2e/gke_node_pools.go @@ -21,6 +21,7 @@ import ( "os/exec" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" . "github.com/onsi/ginkgo" ) @@ -34,13 +35,13 @@ var _ = framework.KubeDescribe("GKE node pools [Feature:GKENodePool]", func() { }) It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() { - framework.Logf("Start create node pool test") + e2elog.Logf("Start create node pool test") testCreateDeleteNodePool(f, "test-pool") }) }) func testCreateDeleteNodePool(f *framework.Framework, poolName string) { - framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) + e2elog.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster) @@ -48,50 +49,50 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { poolName, clusterStr, "--num-nodes=2").CombinedOutput() - framework.Logf("\n%s", string(out)) + e2elog.Logf("\n%s", string(out)) if err != nil { framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out)) } - framework.Logf("Successfully created node pool %q.", poolName) + e2elog.Logf("Successfully created node pool %q.", poolName) out, err = exec.Command("gcloud", "container", "node-pools", "list", clusterStr).CombinedOutput() if err != nil { framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) } - framework.Logf("Node pools:\n%s", string(out)) + e2elog.Logf("Node pools:\n%s", string(out)) - framework.Logf("Checking that 2 nodes have the correct node pool label.") + e2elog.Logf("Checking that 2 nodes have the correct node pool label.") nodeCount := nodesWithPoolLabel(f, poolName) if nodeCount != 2 { framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) } - framework.Logf("Success, found 2 nodes with correct node pool labels.") + e2elog.Logf("Success, found 2 nodes with correct node pool labels.") - framework.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) + e2elog.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster) out, err = exec.Command("gcloud", "container", "node-pools", "delete", poolName, clusterStr, "-q").CombinedOutput() - framework.Logf("\n%s", string(out)) + e2elog.Logf("\n%s", string(out)) if err != nil { framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out)) } - framework.Logf("Successfully deleted node pool %q.", poolName) + e2elog.Logf("Successfully deleted node pool %q.", poolName) out, err = exec.Command("gcloud", "container", "node-pools", "list", clusterStr).CombinedOutput() if err != nil { framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) } - framework.Logf("\nNode pools:\n%s", string(out)) + e2elog.Logf("\nNode pools:\n%s", string(out)) - framework.Logf("Checking that no nodes have the deleted node pool's label.") + e2elog.Logf("Checking that no nodes have the deleted node pool's label.") nodeCount = nodesWithPoolLabel(f, poolName) if nodeCount != 0 { framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) } - framework.Logf("Success, found no nodes with the deleted node pool's label.") + e2elog.Logf("Success, found no nodes with the deleted node pool's label.") } // nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool" diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 852db999c1a..5b06a5f8dc0 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -45,6 +45,7 @@ go_library( "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/gpu:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index c0105e78daf..c61dde9c851 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -27,6 +27,7 @@ import ( clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -72,7 +73,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items { - framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) + e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) framework.PrintAllKubeletPods(cs, node.Name) } diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index dc68705ad58..592d7319e82 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -100,7 +101,7 @@ var _ = SIGDescribe("LimitRange", func() { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) if err != nil { // Print the pod to help in debugging. - framework.Logf("Pod %+v does not have the expected requirements", pod) + e2elog.Logf("Pod %+v does not have the expected requirements", pod) Expect(err).NotTo(HaveOccurred()) } } @@ -121,7 +122,7 @@ var _ = SIGDescribe("LimitRange", func() { err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) if err != nil { // Print the pod to help in debugging. - framework.Logf("Pod %+v does not have the expected requirements", pod) + e2elog.Logf("Pod %+v does not have the expected requirements", pod) Expect(err).NotTo(HaveOccurred()) } } @@ -170,18 +171,18 @@ var _ = SIGDescribe("LimitRange", func() { limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(options) if err != nil { - framework.Logf("Unable to retrieve LimitRanges: %v", err) + e2elog.Logf("Unable to retrieve LimitRanges: %v", err) return false, nil } if len(limitRanges.Items) == 0 { - framework.Logf("limitRange is already deleted") + e2elog.Logf("limitRange is already deleted") return true, nil } if len(limitRanges.Items) > 0 { if limitRanges.Items[0].ObjectMeta.DeletionTimestamp == nil { - framework.Logf("deletion has not yet been observed") + e2elog.Logf("deletion has not yet been observed") return false, nil } return true, nil @@ -200,12 +201,12 @@ var _ = SIGDescribe("LimitRange", func() { }) func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error { - framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests) + e2elog.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests) err := equalResourceList(expected.Requests, actual.Requests) if err != nil { return err } - framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits) + e2elog.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits) err = equalResourceList(expected.Limits, actual.Limits) return err } diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 01bbaa93f64..1f667a3b8f6 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -27,6 +27,7 @@ import ( extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/gpu" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -81,25 +82,25 @@ func logOSImages(f *framework.Framework) { nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { - framework.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage) + e2elog.Logf("Nodename: %v, OS Image: %v", node.Name, node.Status.NodeInfo.OSImage) } } func areGPUsAvailableOnAllSchedulableNodes(f *framework.Framework) bool { - framework.Logf("Getting list of Nodes from API server") + e2elog.Logf("Getting list of Nodes from API server") nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) framework.ExpectNoError(err, "getting node list") for _, node := range nodeList.Items { if node.Spec.Unschedulable { continue } - framework.Logf("gpuResourceName %s", gpuResourceName) + e2elog.Logf("gpuResourceName %s", gpuResourceName) if val, ok := node.Status.Capacity[gpuResourceName]; !ok || val.Value() == 0 { - framework.Logf("Nvidia GPUs not available on Node: %q", node.Name) + e2elog.Logf("Nvidia GPUs not available on Node: %q", node.Name) return false } } - framework.Logf("Nvidia GPUs exist on all schedulable nodes") + e2elog.Logf("Nvidia GPUs exist on all schedulable nodes") return true } @@ -126,34 +127,34 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra } gpuResourceName = gpu.NVIDIAGPUResourceName - framework.Logf("Using %v", dsYamlUrl) + e2elog.Logf("Using %v", dsYamlUrl) // Creates the DaemonSet that installs Nvidia Drivers. ds, err := framework.DsFromManifest(dsYamlUrl) Expect(err).NotTo(HaveOccurred()) ds.Namespace = f.Namespace.Name _, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(ds) framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") - framework.Logf("Successfully created daemonset to install Nvidia drivers.") + e2elog.Logf("Successfully created daemonset to install Nvidia drivers.") pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) if err == nil { - framework.Logf("Adding deviceplugin addon pod.") + e2elog.Logf("Adding deviceplugin addon pod.") pods.Items = append(pods.Items, devicepluginPods.Items...) } var rsgather *framework.ContainerResourceGatherer if setupResourceGatherer { - framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") + e2elog.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") rsgather, err = framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{InKubemark: false, Nodes: framework.AllNodes, ResourceDataGatheringPeriod: 2 * time.Second, ProbeDuration: 2 * time.Second, PrintVerboseLogs: true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData() } // Wait for Nvidia GPUs to be available on nodes - framework.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...") + e2elog.Logf("Waiting for drivers to be installed and GPUs to be available in Node Capacity...") Eventually(func() bool { return areGPUsAvailableOnAllSchedulableNodes(f) }, driverInstallTimeout, time.Second).Should(BeTrue()) @@ -163,18 +164,18 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra func testNvidiaGPUs(f *framework.Framework) { rsgather := SetupNVIDIAGPUNode(f, true) - framework.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app") + e2elog.Logf("Creating as many pods as there are Nvidia GPUs and have the pods run a CUDA app") podList := []*v1.Pod{} for i := int64(0); i < getGPUsAvailable(f); i++ { podList = append(podList, f.PodClient().Create(makeCudaAdditionDevicePluginTestPod())) } - framework.Logf("Wait for all test pods to succeed") + e2elog.Logf("Wait for all test pods to succeed") // Wait for all pods to succeed for _, po := range podList { f.PodClient().WaitForSuccess(po.Name, 5*time.Minute) } - framework.Logf("Stopping ResourceUsageGather") + e2elog.Logf("Stopping ResourceUsageGather") constraints := make(map[string]framework.ResourceConstraint) // For now, just gets summary. Can pass valid constraints in the future. summary, err := rsgather.StopAndSummarize([]int{50, 90, 100}, constraints) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 2efc175726e..7a6040f94f8 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -88,7 +89,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ExpectNoError(err) for _, node := range nodeList.Items { - framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) + e2elog.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name) framework.PrintAllKubeletPods(cs, node.Name) } @@ -103,7 +104,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { totalPodCapacity = 0 for _, node := range nodeList.Items { - framework.Logf("Node: %v", node) + e2elog.Logf("Node: %v", node) podCapacity, found := node.Status.Capacity[v1.ResourcePods] Expect(found).To(Equal(true)) totalPodCapacity += podCapacity.Value() @@ -123,7 +124,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, - }), true, framework.Logf)) + }), true, e2elog.Logf)) } podName := "additional-pod" WaitForSchedulerAfterAction(f, createPausePodAction(f, pausePodConfig{ @@ -158,7 +159,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { - framework.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName) + e2elog.Logf("Pod %v requesting local ephemeral resource =%vm on Node %v", pod.Name, getRequestedStorageEphemeralStorage(pod), pod.Spec.NodeName) nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedStorageEphemeralStorage(pod) } } @@ -167,9 +168,9 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { milliEphemeralStoragePerPod := nodeMaxAllocatable / maxNumberOfPods - framework.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod) + e2elog.Logf("Using pod capacity: %vm", milliEphemeralStoragePerPod) for name, leftAllocatable := range nodeToAllocatableMap { - framework.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable) + e2elog.Logf("Node: %v has local ephemeral resource allocatable: %vm", name, leftAllocatable) podsNeededForSaturation += (int)(leftAllocatable / milliEphemeralStoragePerPod) } @@ -192,7 +193,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { v1.ResourceEphemeralStorage: *resource.NewMilliQuantity(milliEphemeralStoragePerPod, "DecimalSI"), }, }, - }), true, framework.Logf)) + }), true, e2elog.Logf)) } podName := "additional-pod" conf := pausePodConfig{ @@ -262,7 +263,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { for _, pod := range pods.Items { _, found := nodeToAllocatableMap[pod.Spec.NodeName] if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed { - framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) + e2elog.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) nodeToAllocatableMap[pod.Spec.NodeName] -= getRequestedCPU(pod) } } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 95770d580ab..0efed3588a7 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -34,6 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/replicaset" . "github.com/onsi/ginkgo" @@ -115,7 +116,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { Requests: podRes, }, }) - framework.Logf("Created pod: %v", pods[i].Name) + e2elog.Logf("Created pod: %v", pods[i].Name) } By("Wait for pods to be scheduled.") for _, pod := range pods { @@ -175,7 +176,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { Requests: podRes, }, }) - framework.Logf("Created pod: %v", pods[i].Name) + e2elog.Logf("Created pod: %v", pods[i].Name) } By("Wait for pods to be scheduled.") for _, pod := range pods { @@ -285,7 +286,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { }, }, }) - framework.Logf("Created pod: %v", pods[i].Name) + e2elog.Logf("Created pod: %v", pods[i].Name) } defer func() { // Remove added labels for i := 0; i < numPods; i++ { @@ -368,7 +369,7 @@ var _ = SIGDescribe("PodPriorityResolution [Serial]", func() { framework.ExpectNoError(err) }() Expect(pod.Spec.Priority).NotTo(BeNil()) - framework.Logf("Created pod: %v", pod.Name) + e2elog.Logf("Created pod: %v", pod.Name) } }) }) @@ -391,11 +392,11 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // list existing priorities priorityList, err := cs.SchedulingV1().PriorityClasses().List(metav1.ListOptions{}) if err != nil { - framework.Logf("Unable to list priorities: %v", err) + e2elog.Logf("Unable to list priorities: %v", err) } else { - framework.Logf("List existing priorities:") + e2elog.Logf("List existing priorities:") for _, p := range priorityList.Items { - framework.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp) + e2elog.Logf("%v/%v created at %v", p.Name, p.Value, p.CreationTimestamp) } } } @@ -420,7 +421,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { // find an available node By("Finding an available node") nodeName := GetNodeThatCanRunPod(f) - framework.Logf("found a healthy node: %s", nodeName) + e2elog.Logf("found a healthy node: %s", nodeName) // get the node API object var err error @@ -449,8 +450,8 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { priorityPairs = append(priorityPairs, priorityPair{name: priorityName, value: priorityVal}) _, err := cs.SchedulingV1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: priorityName}, Value: priorityVal}) if err != nil { - framework.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) - framework.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err) + e2elog.Logf("Failed to create priority '%v/%v': %v", priorityName, priorityVal, err) + e2elog.Logf("Reason: %v. Msg: %v", errors.ReasonForError(err), err) } Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true)) } @@ -549,16 +550,16 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { runPauseRS(f, rsConfs[i]) } - framework.Logf("pods created so far: %v", podNamesSeen) - framework.Logf("length of pods created so far: %v", len(podNamesSeen)) + e2elog.Logf("pods created so far: %v", podNamesSeen) + e2elog.Logf("length of pods created so far: %v", len(podNamesSeen)) // create ReplicaSet4 // if runPauseRS failed, it means ReplicaSet4 cannot be scheduled even after 1 minute // which is unacceptable runPauseRS(f, rsConfs[rsNum-1]) - framework.Logf("pods created so far: %v", podNamesSeen) - framework.Logf("length of pods created so far: %v", len(podNamesSeen)) + e2elog.Logf("pods created so far: %v", podNamesSeen) + e2elog.Logf("length of pods created so far: %v", len(podNamesSeen)) // count pods number of ReplicaSet{1,2,3}, if it's more than expected replicas // then it denotes its pods have been over-preempted diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 1a12e0ca9e7..d460f937ffd 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -35,6 +35,7 @@ import ( priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -154,7 +155,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { defer func() { // Resize the replication controller to zero to get rid of pods. if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil { - framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err) + e2elog.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err) } }() @@ -301,7 +302,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n Requests: needCreateResource, }, NodeName: node.Name, - }), true, framework.Logf) + }), true, e2elog.Logf) if err != nil { return err @@ -317,7 +318,7 @@ func createBalancedPodForNodes(f *framework.Framework, cs clientset.Interface, n } func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.ResourceRequirements) (float64, float64) { - framework.Logf("ComputeCpuMemFraction for node: %v", node.Name) + e2elog.Logf("ComputeCpuMemFraction for node: %v", node.Name) totalRequestedCpuResource := resource.Requests.Cpu().MilliValue() totalRequestedMemResource := resource.Requests.Memory().Value() allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) @@ -326,7 +327,7 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re } for _, pod := range allpods.Items { if pod.Spec.NodeName == node.Name { - framework.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory) + e2elog.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, getNonZeroRequests(&pod).MilliCPU, getNonZeroRequests(&pod).Memory) // Ignore best effort pods while computing fractions as they won't be taken in account by scheduler. if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort { continue @@ -352,8 +353,8 @@ func computeCpuMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re memFraction = floatOne } - framework.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction) - framework.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction) + e2elog.Logf("Node: %v, totalRequestedCpuResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCpuResource, cpuAllocatableMil, cpuFraction) + e2elog.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction) return cpuFraction, memFraction } diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index bdf054f887c..111349f7ff2 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -19,20 +19,19 @@ package scheduling import ( "time" + . "github.com/onsi/ginkgo" + _ "github.com/stretchr/testify/assert" + + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" - - "k8s.io/client-go/tools/cache" - - "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" - - . "github.com/onsi/ginkgo" - _ "github.com/stretchr/testify/assert" ) func getTestTaint() v1.Taint { @@ -137,7 +136,7 @@ func createTestController(cs clientset.Interface, observedDeletions chan string, }, }, ) - framework.Logf("Starting informer...") + e2elog.Logf("Starting informer...") go controller.Run(stopCh) } @@ -179,7 +178,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) By("Trying to apply a taint on the Node") testTaint := getTestTaint() @@ -194,7 +193,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: framework.Failf("Failed to evict Pod") case <-observedDeletions: - framework.Logf("Noticed Pod eviction. Test successful") + e2elog.Logf("Noticed Pod eviction. Test successful") } }) @@ -211,7 +210,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) By("Trying to apply a taint on the Node") testTaint := getTestTaint() @@ -224,7 +223,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: - framework.Logf("Pod wasn't evicted. Test successful") + e2elog.Logf("Pod wasn't evicted. Test successful") case <-observedDeletions: framework.Failf("Pod was evicted despite toleration") } @@ -244,7 +243,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) By("Trying to apply a taint on the Node") testTaint := getTestTaint() @@ -257,7 +256,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { timeoutChannel := time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: - framework.Logf("Pod wasn't evicted") + e2elog.Logf("Pod wasn't evicted") case <-observedDeletions: framework.Failf("Pod was evicted despite toleration") return @@ -268,7 +267,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: framework.Failf("Pod wasn't evicted") case <-observedDeletions: - framework.Logf("Pod was evicted after toleration time run out. Test successful") + e2elog.Logf("Pod was evicted after toleration time run out. Test successful") return } }) @@ -288,7 +287,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { By("Starting pod...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod is running on %v. Tainting Node", nodeName) By("Trying to apply a taint on the Node") testTaint := getTestTaint() @@ -306,19 +305,19 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { timeoutChannel := time.NewTimer(AdditionalWaitPerDeleteSeconds).C select { case <-timeoutChannel: - framework.Logf("Pod wasn't evicted. Proceeding") + e2elog.Logf("Pod wasn't evicted. Proceeding") case <-observedDeletions: framework.Failf("Pod was evicted despite toleration") return } - framework.Logf("Removing taint from Node") + e2elog.Logf("Removing taint from Node") framework.RemoveTaintOffNode(cs, nodeName, testTaint) taintRemoved = true By("Waiting some time to make sure that toleration time passed.") timeoutChannel = time.NewTimer(time.Duration(KubeletPodDeletionDelaySeconds+3*AdditionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: - framework.Logf("Pod wasn't evicted. Test successful") + e2elog.Logf("Pod wasn't evicted. Test successful") case <-observedDeletions: framework.Failf("Pod was evicted despite toleration") } @@ -355,10 +354,10 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { By("Starting pods...") nodeName1, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod1 is running on %v. Tainting Node", nodeName1) + e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName1) nodeName2, err := testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod2 is running on %v. Tainting Node", nodeName2) + e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName2) By("Trying to apply a taint on the Nodes") testTaint := getTestTaint() @@ -387,7 +386,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { case podName := <-observedDeletions: evicted++ if podName == podGroup+"1" { - framework.Logf("Noticed Pod %q gets evicted.", podName) + e2elog.Logf("Noticed Pod %q gets evicted.", podName) } else if podName == podGroup+"2" { framework.Failf("Unexepected Pod %q gets evicted.", podName) return @@ -417,12 +416,12 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) } framework.ExpectNoError(err) - framework.Logf("Pod1 is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName) // ensure pod2 lands on the same node as pod1 pod2.Spec.NodeSelector = map[string]string{"kubernetes.io/hostname": nodeHostNameLabel} _, err = testutils.RunPodAndGetNodeName(cs, pod2, 2*time.Minute) framework.ExpectNoError(err) - framework.Logf("Pod2 is running on %v. Tainting Node", nodeName) + e2elog.Logf("Pod2 is running on %v. Tainting Node", nodeName) By("Trying to apply a taint on the Node") testTaint := getTestTaint() @@ -440,7 +439,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted) return case podName := <-observedDeletions: - framework.Logf("Noticed Pod %q gets evicted.", podName) + e2elog.Logf("Noticed Pod %q gets evicted.", podName) evicted++ } } diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index f985f133e06..b0bae6d4222 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -100,7 +101,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. // Thus, no need to test for it. Once the precondition changes to zero number of replicas, // test for replicaCount > 0. Otherwise, StartPods panics. - framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf)) + framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, e2elog.Logf)) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) @@ -207,7 +208,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { defer func() { // Resize the replication controller to zero to get rid of pods. if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { - framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) + e2elog.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index c4ffd285d39..d282b002475 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) @@ -65,7 +66,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { // Get all the zones that the nodes are in expectedZones, err := gceCloud.GetAllZonesFromCloudProvider() Expect(err).NotTo(HaveOccurred()) - framework.Logf("Expected zones: %v", expectedZones) + e2elog.Logf("Expected zones: %v", expectedZones) // Get all the zones in this current region region := gceCloud.Region() @@ -120,7 +121,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { defer func() { // Teardown of the compute instance - framework.Logf("Deleting compute resource: %v", name) + e2elog.Logf("Deleting compute resource: %v", name) err := gceCloud.DeleteInstance(project, zone, name) Expect(err).NotTo(HaveOccurred()) }() @@ -140,7 +141,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { // Defer the cleanup defer func() { - framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) + e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) if err != nil { framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) diff --git a/test/e2e/ui/BUILD b/test/e2e/ui/BUILD index d38ec5b5bd0..a953385001b 100644 --- a/test/e2e/ui/BUILD +++ b/test/e2e/ui/BUILD @@ -14,6 +14,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", ], diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index 74321add0b6..58042e05ba3 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -26,6 +26,7 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -63,7 +64,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { var status int proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - framework.Logf("Get services proxy request failed: %v", errProxy) + e2elog.Logf("Get services proxy request failed: %v", errProxy) } ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) @@ -82,9 +83,9 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { framework.Failf("Request to kubernetes-dashboard failed: %v", err) return true, err } - framework.Logf("Request to kubernetes-dashboard failed: %v", err) + e2elog.Logf("Request to kubernetes-dashboard failed: %v", err) } else if status != http.StatusOK { - framework.Logf("Unexpected status from kubernetes-dashboard: %v", status) + e2elog.Logf("Unexpected status from kubernetes-dashboard: %v", status) } // Don't return err here as it aborts polling. return status == http.StatusOK, nil