mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #55940 from shyamjvs/reduce-spam-from-resource-gatherer
Automatic merge from submit-queue (batch tested with PRs 55233, 55927, 55903, 54867, 55940). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Control logs verbosity in resource gatherer PR https://github.com/kubernetes/kubernetes/pull/53541 added some logging in resource gatherer which is a bit too verbose for normal purposes. As a result, we're seeing a lot of spam in our large cluster performance tests (e.g - https://storage.googleapis.com/kubernetes-jenkins/logs/ci-kubernetes-e2e-gci-gce-scalability/8046/build-log.txt) This PR is making the verbosity of those logs controllable through an option. It's off by default, but turning it on for the gpu test to preserve behavior there. /cc @jiayingz @mindprince
This commit is contained in:
commit
87d45a54bd
@ -207,6 +207,7 @@ func (f *Framework) BeforeEach() {
|
|||||||
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||||
ProbeDuration: 15 * time.Second,
|
ProbeDuration: 15 * time.Second,
|
||||||
|
PrintVerboseLogs: false,
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||||
|
@ -137,6 +137,7 @@ type resourceGatherWorker struct {
|
|||||||
inKubemark bool
|
inKubemark bool
|
||||||
resourceDataGatheringPeriod time.Duration
|
resourceDataGatheringPeriod time.Duration
|
||||||
probeDuration time.Duration
|
probeDuration time.Duration
|
||||||
|
printVerboseLogs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *resourceGatherWorker) singleProbe() {
|
func (w *resourceGatherWorker) singleProbe() {
|
||||||
@ -161,7 +162,9 @@ func (w *resourceGatherWorker) singleProbe() {
|
|||||||
}
|
}
|
||||||
for k, v := range nodeUsage {
|
for k, v := range nodeUsage {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
if w.printVerboseLogs {
|
||||||
|
Logf("Get container %v usage on node %v. CPUUsageInCores: %v, MemoryUsageInBytes: %v, MemoryWorkingSetInBytes: %v", k, w.nodeName, v.CPUUsageInCores, v.MemoryUsageInBytes, v.MemoryWorkingSetInBytes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.dataSeries = append(w.dataSeries, data)
|
w.dataSeries = append(w.dataSeries, data)
|
||||||
@ -202,6 +205,7 @@ type ResourceGathererOptions struct {
|
|||||||
MasterOnly bool
|
MasterOnly bool
|
||||||
ResourceDataGatheringPeriod time.Duration
|
ResourceDataGatheringPeriod time.Duration
|
||||||
ProbeDuration time.Duration
|
ProbeDuration time.Duration
|
||||||
|
PrintVerboseLogs bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
|
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*containerResourceGatherer, error) {
|
||||||
@ -221,6 +225,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
|||||||
finished: false,
|
finished: false,
|
||||||
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
||||||
probeDuration: options.ProbeDuration,
|
probeDuration: options.ProbeDuration,
|
||||||
|
printVerboseLogs: options.PrintVerboseLogs,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
// Tracks kube-system pods if no valid PodList is passed in.
|
// Tracks kube-system pods if no valid PodList is passed in.
|
||||||
@ -259,6 +264,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
|||||||
inKubemark: false,
|
inKubemark: false,
|
||||||
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
||||||
probeDuration: options.ProbeDuration,
|
probeDuration: options.ProbeDuration,
|
||||||
|
printVerboseLogs: options.PrintVerboseLogs,
|
||||||
})
|
})
|
||||||
if options.MasterOnly {
|
if options.MasterOnly {
|
||||||
break
|
break
|
||||||
|
@ -184,7 +184,7 @@ func testNvidiaGPUsOnCOS(f *framework.Framework) {
|
|||||||
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
||||||
framework.ExpectNoError(err, "getting pods controlled by the daemonset")
|
framework.ExpectNoError(err, "getting pods controlled by the daemonset")
|
||||||
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
|
framework.Logf("Starting ResourceUsageGather for the created DaemonSet pods.")
|
||||||
rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second}, pods)
|
rsgather, err := framework.NewResourceUsageGatherer(f.ClientSet, framework.ResourceGathererOptions{false, false, 2 * time.Second, 2 * time.Second, true}, pods)
|
||||||
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
|
framework.ExpectNoError(err, "creating ResourceUsageGather for the daemonset pods")
|
||||||
go rsgather.StartGatheringData()
|
go rsgather.StartGatheringData()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user