mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
add names for workqueues to gather controller latency/depth metrics
This commit is contained in:
parent
0f8869d308
commit
4317173d3f
@ -79,7 +79,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
|||||||
|
|
||||||
cc := &CertificateController{
|
cc := &CertificateController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("certificate"),
|
||||||
signer: ca,
|
signer: ca,
|
||||||
approveAllKubeletCSRsForGroup: approveAllKubeletCSRsForGroup,
|
approveAllKubeletCSRsForGroup: approveAllKubeletCSRsForGroup,
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ func NewDaemonSetsController(podInformer framework.SharedIndexInformer, kubeClie
|
|||||||
},
|
},
|
||||||
burstReplicas: BurstReplicas,
|
burstReplicas: BurstReplicas,
|
||||||
expectations: controller.NewControllerExpectations(),
|
expectations: controller.NewControllerExpectations(),
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("daemonset"),
|
||||||
}
|
}
|
||||||
// Manage addition/update of daemon sets.
|
// Manage addition/update of daemon sets.
|
||||||
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
|
dsc.dsStore.Store, dsc.dsController = framework.NewInformer(
|
||||||
|
@ -107,7 +107,7 @@ func NewDeploymentController(client clientset.Interface, resyncPeriod controller
|
|||||||
dc := &DeploymentController{
|
dc := &DeploymentController{
|
||||||
client: client,
|
client: client,
|
||||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
|
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
|
||||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||||
}
|
}
|
||||||
|
|
||||||
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer(
|
dc.dStore.Indexer, dc.dController = framework.NewIndexerInformer(
|
||||||
|
@ -88,7 +88,7 @@ func NewDisruptionController(podInformer framework.SharedIndexInformer, kubeClie
|
|||||||
dc := &DisruptionController{
|
dc := &DisruptionController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
podController: podInformer.GetController(),
|
podController: podInformer.GetController(),
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("disruption"),
|
||||||
broadcaster: record.NewBroadcaster(),
|
broadcaster: record.NewBroadcaster(),
|
||||||
}
|
}
|
||||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||||
|
@ -76,7 +76,7 @@ func NewEndpointController(podInformer framework.SharedIndexInformer, client *cl
|
|||||||
}
|
}
|
||||||
e := &EndpointController{
|
e := &EndpointController{
|
||||||
client: client,
|
client: client,
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("endpoint"),
|
||||||
}
|
}
|
||||||
|
|
||||||
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
e.serviceStore.Store, e.serviceController = framework.NewInformer(
|
||||||
|
@ -94,7 +94,7 @@ func NewJobController(podInformer framework.SharedIndexInformer, kubeClient clie
|
|||||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||||
},
|
},
|
||||||
expectations: controller.NewControllerExpectations(),
|
expectations: controller.NewControllerExpectations(),
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("job"),
|
||||||
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ func NewNamespaceController(
|
|||||||
namespaceController := &NamespaceController{
|
namespaceController := &NamespaceController{
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
clientPool: clientPool,
|
clientPool: clientPool,
|
||||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||||
groupVersionResources: groupVersionResources,
|
groupVersionResources: groupVersionResources,
|
||||||
opCache: operationNotSupportedCache{},
|
opCache: operationNotSupportedCache{},
|
||||||
finalizerToken: finalizerToken,
|
finalizerToken: finalizerToken,
|
||||||
|
@ -94,7 +94,7 @@ func NewPetSetController(podInformer framework.SharedIndexInformer, kubeClient *
|
|||||||
newSyncer: func(blockingPet *pcb) *petSyncer {
|
newSyncer: func(blockingPet *pcb) *petSyncer {
|
||||||
return &petSyncer{pc, blockingPet}
|
return &petSyncer{pc, blockingPet}
|
||||||
},
|
},
|
||||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "petset"),
|
||||||
}
|
}
|
||||||
|
|
||||||
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
podInformer.AddEventHandler(framework.ResourceEventHandlerFuncs{
|
||||||
|
@ -139,7 +139,7 @@ func newReplicaSetController(eventRecorder record.EventRecorder, podInformer fra
|
|||||||
},
|
},
|
||||||
burstReplicas: burstReplicas,
|
burstReplicas: burstReplicas,
|
||||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("replicaset"),
|
||||||
garbageCollectorEnabled: garbageCollectorEnabled,
|
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer frame
|
|||||||
},
|
},
|
||||||
burstReplicas: burstReplicas,
|
burstReplicas: burstReplicas,
|
||||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||||
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "replicationmanager"),
|
||||||
garbageCollectorEnabled: garbageCollectorEnabled,
|
garbageCollectorEnabled: garbageCollectorEnabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,8 +79,8 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
|||||||
// build the resource quota controller
|
// build the resource quota controller
|
||||||
rq := &ResourceQuotaController{
|
rq := &ResourceQuotaController{
|
||||||
kubeClient: options.KubeClient,
|
kubeClient: options.KubeClient,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_resourcequota_primary"),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_primary"),
|
||||||
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_resourcequota_priority"),
|
missingUsageQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "resourcequota_priority"),
|
||||||
resyncPeriod: options.ResyncPeriod,
|
resyncPeriod: options.ResyncPeriod,
|
||||||
registry: options.Registry,
|
registry: options.Registry,
|
||||||
replenishmentControllers: []framework.ControllerInterface{},
|
replenishmentControllers: []framework.ControllerInterface{},
|
||||||
|
@ -81,8 +81,8 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
|||||||
token: options.TokenGenerator,
|
token: options.TokenGenerator,
|
||||||
rootCA: options.RootCA,
|
rootCA: options.RootCA,
|
||||||
|
|
||||||
syncServiceAccountQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
syncServiceAccountQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_service"),
|
||||||
syncSecretQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
syncSecretQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount_tokens_secret"),
|
||||||
|
|
||||||
maxRetries: maxRetries,
|
maxRetries: maxRetries,
|
||||||
}
|
}
|
||||||
|
@ -105,7 +105,7 @@ func NewQuotaEvaluator(quotaAccessor QuotaAccessor, registry quota.Registry, loc
|
|||||||
|
|
||||||
registry: registry,
|
registry: registry,
|
||||||
|
|
||||||
queue: workqueue.New(),
|
queue: workqueue.NewNamed("admission_quota_controller"),
|
||||||
work: map[string][]*admissionWaiter{},
|
work: map[string][]*admissionWaiter{},
|
||||||
dirtyWork: map[string][]*admissionWaiter{},
|
dirtyWork: map[string][]*admissionWaiter{},
|
||||||
inProgress: sets.String{},
|
inProgress: sets.String{},
|
||||||
|
Loading…
Reference in New Issue
Block a user