mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Limit ResourceQuota LIST requests to times when informer is not synced
This should reduce the number of slow (100ms) LIST requests when there are no ResourceQuota objects present in the namespace. The behavior stays virtually the same.
This commit is contained in:
parent
1d723e3849
commit
c1cddd5fff
@ -114,7 +114,9 @@ func (a *QuotaAdmission) SetExternalKubeClientSet(client kubernetes.Interface) {
|
||||
|
||||
// SetExternalKubeInformerFactory registers an informer factory into QuotaAdmission
|
||||
func (a *QuotaAdmission) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) {
|
||||
a.quotaAccessor.lister = f.Core().V1().ResourceQuotas().Lister()
|
||||
quotas := f.Core().V1().ResourceQuotas()
|
||||
a.quotaAccessor.lister = quotas.Lister()
|
||||
a.quotaAccessor.hasSynced = quotas.Informer().HasSynced
|
||||
}
|
||||
|
||||
// SetQuotaConfiguration assigns and initializes configuration and evaluator for QuotaAdmission
|
||||
@ -144,6 +146,9 @@ func (a *QuotaAdmission) ValidateInitialization() error {
|
||||
if a.quotaAccessor.lister == nil {
|
||||
return fmt.Errorf("missing quotaAccessor.lister")
|
||||
}
|
||||
if a.quotaAccessor.hasSynced == nil {
|
||||
return fmt.Errorf("missing quotaAccessor.hasSynced")
|
||||
}
|
||||
if a.quotaConfiguration == nil {
|
||||
return fmt.Errorf("missing quotaConfiguration")
|
||||
}
|
||||
|
@ -48,6 +48,9 @@ type quotaAccessor struct {
|
||||
// lister can list/get quota objects from a shared informer's cache
|
||||
lister corev1listers.ResourceQuotaLister
|
||||
|
||||
// hasSynced indicates whether the lister has completed its initial sync
|
||||
hasSynced func() bool
|
||||
|
||||
// liveLookups holds the last few live lookups we've done to help ammortize cost on repeated lookup failures.
|
||||
// This lets us handle the case of latent caches, by looking up actual results for a namespace on cache miss/no results.
|
||||
// We track the lookup result here so that for repeated requests, we don't look it up very often.
|
||||
@ -112,8 +115,8 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]corev1.ResourceQuota, err
|
||||
return nil, fmt.Errorf("error resolving quota: %v", err)
|
||||
}
|
||||
|
||||
// if there are no items held in our indexer, check our live-lookup LRU, if that misses, do the live lookup to prime it.
|
||||
if len(items) == 0 {
|
||||
// if there are no items held in our unsynced lister, check our live-lookup LRU, if that misses, do the live lookup to prime it.
|
||||
if len(items) == 0 && !e.hasSynced() {
|
||||
lruItemObj, ok := e.liveLookupCache.Get(namespace)
|
||||
if !ok || lruItemObj.(liveLookupEntry).expiry.Before(time.Now()) {
|
||||
// use singleflight.Group to avoid flooding the apiserver with repeated
|
||||
|
@ -97,6 +97,7 @@ func TestLRUCacheLookup(t *testing.T) {
|
||||
accessor, _ := newQuotaAccessor()
|
||||
accessor.client = kubeClient
|
||||
accessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister()
|
||||
accessor.hasSynced = func() bool { return false }
|
||||
accessor.liveLookupCache = liveLookupCache
|
||||
|
||||
for _, q := range tc.cacheInput {
|
||||
@ -151,6 +152,7 @@ func TestGetQuotas(t *testing.T) {
|
||||
accessor, _ := newQuotaAccessor()
|
||||
accessor.client = kubeClient
|
||||
accessor.lister = informerFactory.Core().V1().ResourceQuotas().Lister()
|
||||
accessor.hasSynced = func() bool { return false }
|
||||
|
||||
kubeClient.AddReactor("list", "resourcequotas", func(action core.Action) (bool, runtime.Object, error) {
|
||||
switch action.GetNamespace() {
|
||||
|
Loading…
Reference in New Issue
Block a user