From fce28995e1256017a584ffadd50a81498a24d0f2 Mon Sep 17 00:00:00 2001 From: Shyam Jeedigunta Date: Fri, 17 Nov 2017 12:58:13 +0100 Subject: [PATCH] Control logs verbosity in resource gatherer --- test/e2e/framework/framework.go | 1 + test/e2e/framework/resource_usage_gatherer.go | 8 +++++++- test/e2e/scheduling/nvidia-gpus.go | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 69e47549f52..f0d9eb513df 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -207,6 +207,7 @@ func (f *Framework) BeforeEach() { MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master", ResourceDataGatheringPeriod: 60 * time.Second, ProbeDuration: 15 * time.Second, + PrintVerboseLogs: false, }, nil) if err != nil { Logf("Error while creating NewResourceUsageGatherer: %v", err) diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go index 9ffad1bb424..998968bf859 100644 --- a/test/e2e/framework/resource_usage_gatherer.go +++ b/test/e2e/framework/resource_usage_gatherer.go @@ -137,6 +137,7 @@ type resourceGatherWorker struct { inKubemark bool resourceDataGatheringPeriod time.Duration probeDuration time.Duration + printVerboseLogs bool } func (w *resourceGatherWorker) singleProbe() { @@ -161,7 +162,9 @@ func (w *resourceGatherWorker) singleProbe() { } for k, v := range nodeUsage { data[k] = v - Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes) + if w.printVerboseLogs { + Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes) + } } } w.dataSeries = append(w.dataSeries, data) @@ -202,6 +205,7 @@ type ResourceGathererOptions struct { MasterOnly bool ResourceDataGatheringPeriod time.Duration ProbeDuration time.Duration + PrintVerboseLogs bool } func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) { @@ -221,6 +225,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt finished: false, resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod, probeDuration: options.ProbeDuration, + printVerboseLogs: options.PrintVerboseLogs, }) } else { // Tracks kube-system pods if no valid PodList is passed in. @@ -259,6 +264,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt inKubemark: false, resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod, probeDuration: options.ProbeDuration, + printVerboseLogs: options.PrintVerboseLogs, }) if options.MasterOnly { break diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 5deeea25f7e..5b5aced7db2 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -184,7 +184,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) { pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) framework.ExpectNoError(err, "getting pods controlled by the daemonset") framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.") - rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second}, pods) + rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second, true}, pods) framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods") go rsgather.StartGatheringData()