Merge pull request #55940 from shyamjvs/reduce-spam-from-resource-gatherer

Automatic merge from submit-queue (batch tested with PRs 55233, 55927, 55903, 54867, 55940). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Control logs verbosity in resource gatherer

PR https://github.com/kubernetes/kubernetes/pull/53541 added some logging in resource gatherer which is a bit too verbose for normal purposes.
As a result, we're seeing a lot of spam in our large cluster performance tests (e.g - https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-e2e-gci-gce-scalability/8046/build-log.txt)

This PR is making the verbosity of those logs controllable through an option. It's off by default, but turning it on for the gpu test to preserve behavior there.

/cc @jiayingz @mindprince
This commit is contained in:
Kubernetes Submit Queue 2017-11-18 12:26:18 -08:00 committed by GitHub
commit 87d45a54bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 9 additions and 2 deletions

View File

@ -207,6 +207,7 @@ func (f *Framework) BeforeEach() {
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
ResourceDataGatheringPeriod: 60 * time.Second,
ProbeDuration: 15 * time.Second,
PrintVerboseLogs: false,
}, nil)
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)

View File

@ -137,6 +137,7 @@ type resourceGatherWorker struct {
inKubemark bool
resourceDataGatheringPeriod time.Duration
probeDuration time.Duration
printVerboseLogs bool
}
func (w *resourceGatherWorker) singleProbe() {
@ -161,7 +162,9 @@ func (w *resourceGatherWorker) singleProbe() {
}
for k, v := range nodeUsage {
data[k] = v
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
if w.printVerboseLogs {
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
}
}
}
w.dataSeries = append(w.dataSeries, data)
@ -202,6 +205,7 @@ type ResourceGathererOptions struct {
MasterOnly bool
ResourceDataGatheringPeriod time.Duration
ProbeDuration time.Duration
PrintVerboseLogs bool
}
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
@ -221,6 +225,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
finished: false,
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
probeDuration: options.ProbeDuration,
printVerboseLogs: options.PrintVerboseLogs,
})
} else {
// Tracks kube-system pods if no valid PodList is passed in.
@ -259,6 +264,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
inKubemark: false,
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
probeDuration: options.ProbeDuration,
printVerboseLogs: options.PrintVerboseLogs,
})
if options.MasterOnly {
break

View File

@ -184,7 +184,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
framework.ExpectNoError(err, "getting pods controlled by the daemonset")
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second}, pods)
rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second, true}, pods)
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
go rsgather.StartGatheringData()