remove rate limiter metric as it is not in use

Change-Id: I91157653e3860eeecc3f572aee88da6ffc65faed
This commit is contained in:
Han Kang 2022-10-13 12:03:35 -07:00
parent b601769721
commit 2bbd445f50
28 changed files with 0 additions and 275 deletions

View File

@ -41,7 +41,6 @@ import (
cloudnodelifecyclecontroller "k8s.io/cloud-provider/controllers/nodelifecycle"
routecontroller "k8s.io/cloud-provider/controllers/route"
servicecontroller "k8s.io/cloud-provider/controllers/service"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/controller-manager/controller"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
@ -409,12 +408,6 @@ func startResourceQuotaController(ctx context.Context, controllerContext Control
Registry: generic.NewRegistry(quotaConfiguration.Evaluators()),
UpdateFilter: quotainstall.DefaultUpdateFilter(),
}
if resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("resource_quota_controller", resourceQuotaControllerClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, true, err
}
}
resourceQuotaController, err := resourcequotacontroller.NewController(resourceQuotaControllerOptions)
if err != nil {
return nil, false, err

View File

@ -38,7 +38,6 @@ import (
"k8s.io/client-go/util/workqueue"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
jws "k8s.io/cluster-bootstrap/token/jws"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
api "k8s.io/kubernetes/pkg/apis/core"
)
@ -106,11 +105,6 @@ func NewSigner(cl clientset.Interface, secrets informers.SecretInformer, configM
configMapSynced: configMaps.Informer().HasSynced,
syncQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "bootstrap_signer_queue"),
}
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("bootstrap_signer", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
configMaps.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{

View File

@ -33,7 +33,6 @@ import (
"k8s.io/client-go/util/workqueue"
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
bootstrapsecretutil "k8s.io/cluster-bootstrap/util/secrets"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/controller"
@ -82,12 +81,6 @@ func NewTokenCleaner(cl clientset.Interface, secrets coreinformers.SecretInforme
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "token_cleaner"),
}
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("token_cleaner", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
secrets.Informer().AddEventHandlerWithResyncPeriod(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {

View File

@ -32,7 +32,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
)
@ -58,11 +57,6 @@ func NewPublisher(cmInformer coreinformers.ConfigMapInformer, nsInformer coreinf
rootCA: rootCA,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "root_ca_cert_publisher"),
}
if cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("root_ca_cert_publisher", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: e.configMapDeleted,

View File

@ -45,7 +45,6 @@ import (
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/cronjob/metrics"
@ -85,12 +84,6 @@ func NewControllerV2(jobInformer batchv1informers.JobInformer, cronJobsInformer
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(&covev1client.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
jm := &ControllerV2{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cronjob"),
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "cronjob-controller"}),

View File

@ -47,7 +47,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
v1helper "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -142,11 +141,6 @@ func NewDaemonSetsController(
) (*DaemonSetsController, error) {
eventBroadcaster := record.NewBroadcaster()
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
dsc := &DaemonSetsController{
kubeClient: kubeClient,
eventBroadcaster: eventBroadcaster,

View File

@ -46,7 +46,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util"
)
@ -103,11 +102,6 @@ type DeploymentController struct {
func NewDeploymentController(dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
eventBroadcaster := record.NewBroadcaster()
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
dc := &DeploymentController{
client: client,
eventBroadcaster: eventBroadcaster,

View File

@ -38,7 +38,6 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/api/v1/endpoints"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
@ -75,9 +74,6 @@ func NewEndpointController(podInformer coreinformers.PodInformer, serviceInforme
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.CoreV1().RESTClient().GetRateLimiter())
}
e := &Controller{
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "endpoint"),

View File

@ -39,7 +39,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
endpointslicemetrics "k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
@ -88,10 +87,6 @@ func NewController(podInformer coreinformers.PodInformer,
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("endpoint_slice_controller", client.DiscoveryV1().RESTClient().GetRateLimiter())
}
endpointslicemetrics.RegisterMetrics()
c := &Controller{

View File

@ -38,7 +38,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
@ -77,10 +76,6 @@ func NewController(endpointsInformer coreinformers.EndpointsInformer,
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("endpoint_slice_mirroring_controller", client.DiscoveryV1().RESTClient().GetRateLimiter())
}
metrics.RegisterMetrics()
c := &Controller{

View File

@ -47,7 +47,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
@ -131,10 +130,6 @@ type Controller struct {
func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller {
eventBroadcaster := record.NewBroadcaster()
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
jm := &Controller{
kubeClient: kubeClient,
podControl: controller.RealPodControl{

View File

@ -33,7 +33,6 @@ import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/namespace/deletion"
@ -77,10 +76,6 @@ func NewNamespaceController(
namespacedResourcesDeleter: deletion.NewNamespacedResourcesDeleter(kubeClient.CoreV1().Namespaces(), metadataClient, kubeClient.CoreV1(), discoverResourcesFn, finalizerToken),
}
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("namespace_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
// configure the namespace informer event handlers
namespaceInformer.Informer().AddEventHandlerWithResyncPeriod(
cache.ResourceEventHandlerFuncs{

View File

@ -46,7 +46,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
@ -133,10 +132,6 @@ func NewMultiCIDRRangeAllocator(
Interface: client.CoreV1().Events(""),
})
if client.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("multi_cidr_range_allocator", client.CoreV1().RESTClient().GetRateLimiter())
}
ra := &multiCIDRRangeAllocator{
client: client,
nodeLister: nodeInformer.Lister(),

View File

@ -30,7 +30,6 @@ import (
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
)
@ -94,10 +93,6 @@ func NewNodeIpamController(
Interface: kubeClient.CoreV1().Events(""),
})
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_ipam_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
// Cloud CIDR allocator does not rely on clusterCIDR or nodeCIDRMaskSize for allocation.
if allocatorType != ipam.CloudAllocatorType {
if len(clusterCIDRs) == 0 {

View File

@ -50,7 +50,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
nodetopology "k8s.io/component-helpers/node/topology"
kubeletapis "k8s.io/kubelet/pkg/apis"
"k8s.io/kubernetes/pkg/controller"
@ -373,10 +372,6 @@ func NewNodeLifecycleController(
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
nc := &Controller{
kubeClient: kubeClient,
now: metav1.Now,

View File

@ -35,7 +35,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
@ -75,9 +74,6 @@ func NewPodGC(ctx context.Context, kubeClient clientset.Interface, podInformer c
// This function is only intended for integration tests
func NewPodGCInternal(ctx context.Context, kubeClient clientset.Interface, podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer, terminatedPodThreshold int, gcCheckPeriod, quarantineTime time.Duration) *PodGCController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,

View File

@ -56,7 +56,6 @@ import (
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller"
@ -138,9 +137,6 @@ func NewReplicaSetController(rsInformer appsinformers.ReplicaSetInformer, podInf
// parameters so that it can also serve as the implementation of NewReplicationController.
func NewBaseController(rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, kubeClient clientset.Interface, burstReplicas int,
gvk schema.GroupVersionKind, metricOwnerName, queueName string, podControl controller.PodControlInterface, eventBroadcaster record.EventBroadcaster) *ReplicaSetController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage(metricOwnerName, kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
rsc := &ReplicaSetController{
GroupVersionKind: gvk,

View File

@ -32,7 +32,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
)
@ -68,11 +67,6 @@ func NewServiceAccountsController(saInformer coreinformers.ServiceAccountInforme
serviceAccountsToEnsure: options.ServiceAccounts,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "serviceaccount"),
}
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_controller", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
saInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
DeleteFunc: e.serviceAccountDeleted,

View File

@ -37,7 +37,6 @@ import (
"k8s.io/client-go/tools/cache"
clientretry "k8s.io/client-go/util/retry"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/registry/core/secret"
"k8s.io/kubernetes/pkg/serviceaccount"
@ -91,11 +90,6 @@ func NewTokensController(serviceAccounts informers.ServiceAccountInformer, secre
maxRetries: maxRetries,
autoGenerate: options.AutoGenerate,
}
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("serviceaccount_tokens_controller", cl.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
e.serviceAccounts = serviceAccounts.Lister()
e.serviceAccountSynced = serviceAccounts.Informer().HasSynced

View File

@ -36,7 +36,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/kubectl/pkg/scheme"
"k8s.io/kubernetes/pkg/controller"
jobutil "k8s.io/kubernetes/pkg/controller/job"
@ -76,10 +75,6 @@ func New(jobInformer batchinformers.JobInformer, client clientset.Interface) *Co
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("ttl_after_finished_controller", client.CoreV1().RESTClient().GetRateLimiter())
}
metrics.Register()
tc := &Controller{

View File

@ -31,7 +31,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/common"
@ -61,9 +60,6 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
}
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
}
e.pvcLister = pvcInformer.Lister()
e.pvcListerSynced = pvcInformer.Informer().HasSynced

View File

@ -31,7 +31,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/protectionutil"
"k8s.io/kubernetes/pkg/util/slice"
@ -55,9 +54,6 @@ func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer
client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
}
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolume_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
}
e.pvLister = pvInformer.Lister()
e.pvListerSynced = pvInformer.Informer().HasSynced

View File

@ -42,7 +42,6 @@ import (
clientretry "k8s.io/client-go/util/retry"
cloudprovider "k8s.io/cloud-provider"
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
nodeutil "k8s.io/component-helpers/node/util"
)
@ -70,10 +69,6 @@ type RouteController struct {
}
func New(routes cloudprovider.Routes, kubeClient clientset.Interface, nodeInformer coreinformers.NodeInformer, clusterName string, clusterCIDRs []*net.IPNet) *RouteController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("route_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
if len(clusterCIDRs) == 0 {
klog.Fatal("RouteController: Must specify clusterCIDR.")
}

View File

@ -41,7 +41,6 @@ import (
servicehelper "k8s.io/cloud-provider/service/helpers"
"k8s.io/component-base/featuregate"
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
"k8s.io/component-base/metrics/prometheus/ratelimiter"
"k8s.io/klog/v2"
)
@ -109,12 +108,6 @@ func New(
broadcaster := record.NewBroadcaster()
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage(subSystemName, kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
return nil, err
}
}
registerMetrics()
s := &Controller{
cloud: cloud,

View File

@ -1,9 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-instrumentation-approvers
- logicalhan
reviewers:
- sig-instrumentation-reviewers
labels:
- sig/instrumentation

View File

@ -1,77 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ratelimiter
import (
"fmt"
"sync"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/legacyregistry"
)
var (
metricsLock sync.Mutex
rateLimiterMetrics = make(map[string]*rateLimiterMetric)
)
type rateLimiterMetric struct {
metric metrics.GaugeMetric
stopCh chan struct{}
}
func registerRateLimiterMetric(ownerName string) error {
metricsLock.Lock()
defer metricsLock.Unlock()
if _, ok := rateLimiterMetrics[ownerName]; ok {
// only register once in Prometheus. We happen to see an ownerName reused in parallel integration tests.
return nil
}
metric := metrics.NewGauge(&metrics.GaugeOpts{
Name: "rate_limiter_use",
Subsystem: ownerName,
Help: fmt.Sprintf("A metric measuring the saturation of the rate limiter for %v", ownerName),
StabilityLevel: metrics.ALPHA,
})
if err := legacyregistry.Register(metric); err != nil {
return fmt.Errorf("error registering rate limiter usage metric: %v", err)
}
stopCh := make(chan struct{})
rateLimiterMetrics[ownerName] = &rateLimiterMetric{
metric: metric,
stopCh: stopCh,
}
return nil
}
// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track
// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod
func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error {
if err := registerRateLimiterMetric(ownerName); err != nil {
return err
}
// TODO: determine how to track rate limiter saturation
// See discussion at https://go-review.googlesource.com/c/time/+/29958#message-4caffc11669cadd90e2da4c05122cfec50ea6a22
// go wait.Until(func() {
// metricsLock.Lock()
// defer metricsLock.Unlock()
// rateLimiterMetrics[ownerName].metric.Set()
// }, updatePeriod, rateLimiterMetrics[ownerName].stopCh)
return nil
}

View File

@ -1,59 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ratelimiter
import (
"strings"
"testing"
"k8s.io/client-go/util/flowcontrol"
)
func TestRegisterMetricAndTrackRateLimiterUsage(t *testing.T) {
testCases := []struct {
ownerName string
rateLimiter flowcontrol.RateLimiter
err string
}{
{
ownerName: "owner_name",
rateLimiter: flowcontrol.NewTokenBucketRateLimiter(1, 1),
err: "",
},
{
ownerName: "owner_name",
rateLimiter: flowcontrol.NewTokenBucketRateLimiter(1, 1),
err: "already registered",
},
{
ownerName: "invalid-owner-name",
rateLimiter: flowcontrol.NewTokenBucketRateLimiter(1, 1),
err: "error registering rate limiter usage metric",
},
}
for i, tc := range testCases {
e := RegisterMetricAndTrackRateLimiterUsage(tc.ownerName, tc.rateLimiter)
if e != nil {
if tc.err == "" {
t.Errorf("[%d] unexpected error: %v", i, e)
} else if !strings.Contains(e.Error(), tc.err) {
t.Errorf("[%d] expected an error containing %q: %v", i, tc.err, e)
}
}
}
}

1
vendor/modules.txt vendored
View File

@ -1980,7 +1980,6 @@ k8s.io/component-base/metrics/prometheus/clientgo
k8s.io/component-base/metrics/prometheus/clientgo/leaderelection
k8s.io/component-base/metrics/prometheus/controllers
k8s.io/component-base/metrics/prometheus/feature
k8s.io/component-base/metrics/prometheus/ratelimiter
k8s.io/component-base/metrics/prometheus/restclient
k8s.io/component-base/metrics/prometheus/slis
k8s.io/component-base/metrics/prometheus/version