Merge pull request #130864 from xigang/factor-out-index-helper

controller: factor out pod node name indexer helper function
This commit is contained in:
Kubernetes Prow Robot 2025-03-17 08:18:18 -07:00 committed by GitHub
commit eec06b4169
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 28 additions and 16 deletions

View File

@ -83,6 +83,9 @@ const (
// The number of batches is given by:
// 1+floor(log_2(ceil(N/SlowStartInitialBatchSize)))
SlowStartInitialBatchSize = 1
// PodNodeNameKeyIndex is the name of the index used by PodInformer to index pods by their node name.
PodNodeNameKeyIndex = "spec.nodeName"
)
var UpdateTaintBackoff = wait.Backoff{
@ -1051,6 +1054,28 @@ func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.Repli
return filtered
}
// AddPodNodeNameIndexer adds an indexer for Pod's nodeName to the given PodInformer.
// This indexer is used to efficiently look up pods by their node name.
func AddPodNodeNameIndexer(podInformer cache.SharedIndexInformer) error {
if _, exists := podInformer.GetIndexer().GetIndexers()[PodNodeNameKeyIndex]; exists {
// indexer already exists, do nothing
return nil
}
return podInformer.AddIndexers(cache.Indexers{
PodNodeNameKeyIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
},
})
}
// PodKey returns a key unique to the given pod within a cluster.
// It's used so we consistently use the same key scheme in this module.
// It does exactly what cache.MetaNamespaceKeyFunc would have done

View File

@ -125,8 +125,7 @@ const (
const (
// The amount of time the nodecontroller should sleep between retrying node health updates
retrySleepTime = 20 * time.Millisecond
nodeNameKeyIndex = "spec.nodeName"
retrySleepTime = 20 * time.Millisecond
// podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass.
// Pod update workers will only handle lagging cache pods. 4 workers should be enough.
podUpdateWorkerSize = 4
@ -388,22 +387,10 @@ func NewNodeLifecycleController(
},
})
nc.podInformerSynced = podInformer.Informer().HasSynced
podInformer.Informer().AddIndexers(cache.Indexers{
nodeNameKeyIndex: func(obj interface{}) ([]string, error) {
pod, ok := obj.(*v1.Pod)
if !ok {
return []string{}, nil
}
if len(pod.Spec.NodeName) == 0 {
return []string{}, nil
}
return []string{pod.Spec.NodeName}, nil
},
})
controller.AddPodNodeNameIndexer(podInformer.Informer())
podIndexer := podInformer.Informer().GetIndexer()
nc.getPodsAssignedToNode = func(nodeName string) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
objs, err := podIndexer.ByIndex(controller.PodNodeNameKeyIndex, nodeName)
if err != nil {
return nil, err
}