From c1cddd5fffb2f1a0c5c5da69a8c58191a87028dc Mon Sep 17 00:00:00 2001 From: Aleksander Mistewicz Date: Wed, 12 Feb 2025 13:34:09 +0100 Subject: [PATCH] Limit ResourceQuota LIST requests to times when informer is not synced This should reduce the number of slow (100ms) LIST requests when there are no ResourceQuota objects present in the namespace. The behavior stays virtually the same. --- .../pkg/admission/plugin/resourcequota/admission.go | 7 ++++++- .../pkg/admission/plugin/resourcequota/resource_access.go | 7 +++++-- .../admission/plugin/resourcequota/resource_access_test.go | 2 ++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go index 25c266479db..cbb339411ce 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/admission.go @@ -114,7 +114,9 @@ func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) { // SetExternalKubeInformerFactory registers an informer factory into QuotaAdmission func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) { - a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister() + quotas := f.Core().V1().ResourceQuotas() + a.quotaAccessor.lister = quotas.Lister() + a.quotaAccessor.hasSynced = quotas.Informer().HasSynced } // SetQuotaConfiguration assigns and initializes configuration and evaluator for QuotaAdmission @@ -144,6 +146,9 @@ func (a *QuotaAdmission) ValidateInitialization() error { if a.quotaAccessor.lister == nil { return fmt.Errorf("missing quotaAccessor.lister") } + if a.quotaAccessor.hasSynced == nil { + return fmt.Errorf("missing quotaAccessor.hasSynced") + } if a.quotaConfiguration == nil { return fmt.Errorf("missing quotaConfiguration") } diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go index d189446f032..fd4c102e6d5 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access.go @@ -48,6 +48,9 @@ type quotaAccessor struct { // lister can list/get quota objects from a shared informer's cache lister corev1listers.ResourceQuotaLister + // hasSynced indicates whether the lister has completed its initial sync + hasSynced func() bool + // liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures. // This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results. // We track the lookup result here so that for repeated requests, we don't look it up very often. @@ -112,8 +115,8 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err return nil, fmt.Errorf("error resolving quota: %v", err) } - // if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it. - if len(items) == 0 { + // if there are no items held in our unsynced lister, check our live-lookup LRU, if that misses, do the live lookup to prime it. + if len(items) == 0 && !e.hasSynced() { lruItemObj, ok := e.liveLookupCache.Get(namespace) if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) { // use singleflight.Group to avoid flooding the apiserver with repeated diff --git a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access_test.go b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access_test.go index 9bd035a8028..f9383313459 100644 --- a/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access_test.go +++ b/staging/src/k8s.io/apiserver/pkg/admission/plugin/resourcequota/resource_access_test.go @@ -97,6 +97,7 @@ func TestLRUCacheLookup(t *testing.T) { accessor, _ := newQuotaAccessor() accessor.client = kubeClient accessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() + accessor.hasSynced = func() bool { return false } accessor.liveLookupCache = liveLookupCache for _, q := range tc.cacheInput { @@ -151,6 +152,7 @@ func TestGetQuotas(t *testing.T) { accessor, _ := newQuotaAccessor() accessor.client = kubeClient accessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister() + accessor.hasSynced = func() bool { return false } kubeClient.AddReactor("list", "resourcequotas", func(action core.Action) (bool, runtime.Object, error) { switch action.GetNamespace() {