mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Improve quota sync log messages
This commit is contained in:
parent
ec7a04bd20
commit
e5f7af7058
@ -29,6 +29,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -423,15 +424,16 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
oldResources = newResources
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
// have resynced.
|
||||
rq.workerLock.Lock()
|
||||
defer rq.workerLock.Unlock()
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
if klog.V(2) {
|
||||
klog.Infof("syncing resource quota controller with updated resources from discovery: %s", printDiff(oldResources, newResources))
|
||||
}
|
||||
|
||||
// Perform the monitor resync and wait for controllers to report cache sync.
|
||||
if err := rq.resyncMonitors(newResources); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors: %v", err))
|
||||
@ -440,9 +442,30 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
|
||||
if rq.quotaMonitor != nil && !controller.WaitForCacheSync("resource quota", stopCh, rq.quotaMonitor.IsSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("timed out waiting for quota monitor sync"))
|
||||
}
|
||||
|
||||
// success, remember newly synced resources
|
||||
oldResources = newResources
|
||||
klog.V(2).Infof("synced quota controller")
|
||||
}, period, stopCh)
|
||||
}
|
||||
|
||||
// printDiff returns a human-readable summary of what resources were added and removed
|
||||
func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
|
||||
removed := sets.NewString()
|
||||
for oldResource := range oldResources {
|
||||
if _, ok := newResources[oldResource]; !ok {
|
||||
removed.Insert(fmt.Sprintf("%+v", oldResource))
|
||||
}
|
||||
}
|
||||
added := sets.NewString()
|
||||
for newResource := range newResources {
|
||||
if _, ok := oldResources[newResource]; !ok {
|
||||
added.Insert(fmt.Sprintf("%+v", newResource))
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
|
||||
}
|
||||
|
||||
// resyncMonitors starts or stops quota monitors as needed to ensure that all
|
||||
// (and only) those resources present in the map are monitored.
|
||||
func (rq *ResourceQuotaController) resyncMonitors(resources map[schema.GroupVersionResource]struct{}) error {
|
||||
|
@ -284,11 +284,13 @@ func (qm *QuotaMonitor) IsSynced() bool {
|
||||
defer qm.monitorLock.Unlock()
|
||||
|
||||
if len(qm.monitors) == 0 {
|
||||
klog.V(4).Info("quota monitor not synced: no monitors")
|
||||
return false
|
||||
}
|
||||
|
||||
for _, monitor := range qm.monitors {
|
||||
for resource, monitor := range qm.monitors {
|
||||
if !monitor.controller.HasSynced() {
|
||||
klog.V(4).Infof("quota monitor not synced: %v", resource)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user