mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-20 17:38:50 +00:00
Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog - klog as explicit InitFlags() so we add them as necessary - we update the other repositories that we vendor that made a similar change from glog to klog * github.com/kubernetes/repo-infra * k8s.io/gengo/ * k8s.io/kube-openapi/ * github.com/google/cadvisor - Entirely remove all references to glog - Fix some tests by explicit InitFlags in their init() methods Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135
This commit is contained in:
@@ -37,7 +37,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
@@ -180,7 +180,7 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) (*Resou
|
||||
|
||||
// enqueueAll is called at the fullResyncPeriod interval to force a full recalculation of quota usage statistics
|
||||
func (rq *ResourceQuotaController) enqueueAll() {
|
||||
defer glog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
defer klog.V(4).Infof("Resource quota controller queued all resource quota for full calculation of usage")
|
||||
rqs, err := rq.rqLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to enqueue all - error listing resource quotas: %v", err))
|
||||
@@ -200,7 +200,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
|
||||
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
rq.queue.Add(key)
|
||||
@@ -209,7 +209,7 @@ func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
klog.Errorf("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -261,7 +261,7 @@ func (rq *ResourceQuotaController) worker(queue workqueue.RateLimitingInterface)
|
||||
return func() {
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.Infof("resource quota controller worker shutting down")
|
||||
klog.Infof("resource quota controller worker shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -273,8 +273,8 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer rq.queue.ShutDown()
|
||||
|
||||
glog.Infof("Starting resource quota controller")
|
||||
defer glog.Infof("Shutting down resource quota controller")
|
||||
klog.Infof("Starting resource quota controller")
|
||||
defer klog.Infof("Shutting down resource quota controller")
|
||||
|
||||
if rq.quotaMonitor != nil {
|
||||
go rq.quotaMonitor.Run(stopCh)
|
||||
@@ -298,7 +298,7 @@ func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err error) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
klog.V(4).Infof("Finished syncing resource quota %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
@@ -307,11 +307,11 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
}
|
||||
quota, err := rq.rqLister.ResourceQuotas(namespace).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Infof("Resource quota has been deleted %v", key)
|
||||
klog.Infof("Resource quota has been deleted %v", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
klog.Infof("Unable to retrieve resource quota %v from store: %v", key, err)
|
||||
return err
|
||||
}
|
||||
return rq.syncResourceQuota(quota)
|
||||
@@ -426,12 +426,12 @@ func (rq *ResourceQuotaController) Sync(discoveryFunc NamespacedResourcesFunc, p
|
||||
|
||||
// Decide whether discovery has reported a change.
|
||||
if reflect.DeepEqual(oldResources, newResources) {
|
||||
glog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
klog.V(4).Infof("no resource updates from discovery, skipping resource quota sync")
|
||||
return
|
||||
}
|
||||
|
||||
// Something has changed, so track the new state and perform a sync.
|
||||
glog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
klog.V(2).Infof("syncing resource quota controller with updated resources from discovery: %v", newResources)
|
||||
oldResources = newResources
|
||||
|
||||
// Ensure workers are paused to avoid processing events before informers
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -173,11 +173,11 @@ func (qm *QuotaMonitor) controllerFor(resource schema.GroupVersionResource) (cac
|
||||
}
|
||||
shared, err := qm.informerFactory.ForResource(resource)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
klog.V(4).Infof("QuotaMonitor using a shared informer for resource %q", resource.String())
|
||||
shared.Informer().AddEventHandlerWithResyncPeriod(handlers, qm.resyncPeriod())
|
||||
return shared.Informer().GetController(), nil
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
klog.V(4).Infof("QuotaMonitor unable to use a shared informer for resource %q: %v", resource.String(), err)
|
||||
|
||||
// TODO: if we can share storage with garbage collector, it may make sense to support other resources
|
||||
// until that time, aggregated api servers will have to run their own controller to reconcile their own quota.
|
||||
@@ -225,7 +225,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
listResourceFunc := generic.ListResourceUsingListerFunc(listerFunc, resource)
|
||||
evaluator = generic.NewObjectCountEvaluator(resource.GroupResource(), listResourceFunc, "")
|
||||
qm.registry.Add(evaluator)
|
||||
glog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
klog.Infof("QuotaMonitor created object count evaluator for %s", resource.GroupResource())
|
||||
}
|
||||
|
||||
// track the monitor
|
||||
@@ -240,7 +240,7 @@ func (qm *QuotaMonitor) SyncMonitors(resources map[schema.GroupVersionResource]s
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
klog.V(4).Infof("quota synced monitors; added %d, kept %d, removed %d", added, kept, len(toRemove))
|
||||
// NewAggregate returns nil if errs is 0-length
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (qm *QuotaMonitor) StartMonitors() {
|
||||
started++
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
klog.V(4).Infof("QuotaMonitor started %d new monitors, %d currently running", started, len(monitors))
|
||||
}
|
||||
|
||||
// IsSynced returns true if any monitors exist AND all those monitors'
|
||||
@@ -298,8 +298,8 @@ func (qm *QuotaMonitor) IsSynced() bool {
|
||||
// Run sets the stop channel and starts monitor execution until stopCh is
|
||||
// closed. Any running monitors will be stopped before Run returns.
|
||||
func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
glog.Infof("QuotaMonitor running")
|
||||
defer glog.Infof("QuotaMonitor stopping")
|
||||
klog.Infof("QuotaMonitor running")
|
||||
defer klog.Infof("QuotaMonitor stopping")
|
||||
|
||||
// Set up the stop channel.
|
||||
qm.monitorLock.Lock()
|
||||
@@ -323,7 +323,7 @@ func (qm *QuotaMonitor) Run(stopCh <-chan struct{}) {
|
||||
close(monitor.stopCh)
|
||||
}
|
||||
}
|
||||
glog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
klog.Infof("QuotaMonitor stopped %d of %d monitors", stopped, len(monitors))
|
||||
}
|
||||
|
||||
func (qm *QuotaMonitor) runProcessResourceChanges() {
|
||||
@@ -349,7 +349,7 @@ func (qm *QuotaMonitor) processResourceChanges() bool {
|
||||
utilruntime.HandleError(fmt.Errorf("cannot access obj: %v", err))
|
||||
return true
|
||||
}
|
||||
glog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
klog.V(4).Infof("QuotaMonitor process object: %s, namespace %s, name %s, uid %s, event type %v", event.gvr.String(), accessor.GetNamespace(), accessor.GetName(), string(accessor.GetUID()), event.eventType)
|
||||
qm.replenishmentFunc(event.gvr.GroupResource(), accessor.GetNamespace())
|
||||
return true
|
||||
}
|
||||
|
Reference in New Issue
Block a user