mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 04:52:08 +00:00
Only wait for cache syncs once in NodeController
This commit is contained in:
parent
928b8cbdb8
commit
e7befa2a14
@ -356,12 +356,16 @@ func NewNodeController(
|
|||||||
|
|
||||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||||
func (nc *NodeController) Run() {
|
func (nc *NodeController) Run() {
|
||||||
// Incorporate the results of node status pushed from kubelet to master.
|
go func() {
|
||||||
go wait.Until(func() {
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
|
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
|
||||||
glog.Errorf("NodeController timed out while waiting for informers to sync...")
|
utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Incorporate the results of node status pushed from kubelet to master.
|
||||||
|
go wait.Until(func() {
|
||||||
if err := nc.monitorNodeStatus(); err != nil {
|
if err := nc.monitorNodeStatus(); err != nil {
|
||||||
glog.Errorf("Error monitoring node status: %v", err)
|
glog.Errorf("Error monitoring node status: %v", err)
|
||||||
}
|
}
|
||||||
@ -380,10 +384,6 @@ func (nc *NodeController) Run() {
|
|||||||
// c. If there are pods still terminating, wait for their estimated completion
|
// c. If there are pods still terminating, wait for their estimated completion
|
||||||
// before retrying
|
// before retrying
|
||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
|
|
||||||
glog.Errorf("NodeController timed out while waiting for informers to sync...")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
nc.evictorLock.Lock()
|
nc.evictorLock.Lock()
|
||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
for k := range nc.zonePodEvictor {
|
for k := range nc.zonePodEvictor {
|
||||||
@ -417,10 +417,6 @@ func (nc *NodeController) Run() {
|
|||||||
// TODO: replace with a controller that ensures pods that are terminating complete
|
// TODO: replace with a controller that ensures pods that are terminating complete
|
||||||
// in a particular time period
|
// in a particular time period
|
||||||
go wait.Until(func() {
|
go wait.Until(func() {
|
||||||
if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
|
|
||||||
glog.Errorf("NodeController timed out while waiting for informers to sync...")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
nc.evictorLock.Lock()
|
nc.evictorLock.Lock()
|
||||||
defer nc.evictorLock.Unlock()
|
defer nc.evictorLock.Unlock()
|
||||||
for k := range nc.zoneTerminationEvictor {
|
for k := range nc.zoneTerminationEvictor {
|
||||||
@ -447,6 +443,7 @@ func (nc *NodeController) Run() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nodeEvictionPeriod, wait.NeverStop)
|
}, nodeEvictionPeriod, wait.NeverStop)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
|
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
|
||||||
|
Loading…
Reference in New Issue
Block a user