mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
fix sync loop health check
This commit is contained in:
parent
f89d2493f1
commit
96cb43993a
@ -1802,9 +1802,12 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kl.syncLoopMonitor.Store(kl.clock.Now())
|
||||||
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
|
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
kl.syncLoopMonitor.Store(kl.clock.Now())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1842,7 +1845,6 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||||||
// containers have failed liveness checks
|
// containers have failed liveness checks
|
||||||
func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
|
func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler,
|
||||||
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
|
syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool {
|
||||||
kl.syncLoopMonitor.Store(kl.clock.Now())
|
|
||||||
select {
|
select {
|
||||||
case u, open := <-configCh:
|
case u, open := <-configCh:
|
||||||
// Update from a config source; dispatch it to the right handler
|
// Update from a config source; dispatch it to the right handler
|
||||||
@ -1936,7 +1938,6 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kl.syncLoopMonitor.Store(kl.clock.Now())
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,32 +326,6 @@ func newTestPods(count int) []*v1.Pod {
|
|||||||
|
|
||||||
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
|
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
|
||||||
|
|
||||||
func TestSyncLoopTimeUpdate(t *testing.T) {
|
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
|
||||||
defer testKubelet.Cleanup()
|
|
||||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
||||||
kubelet := testKubelet.kubelet
|
|
||||||
|
|
||||||
loopTime1 := kubelet.LatestLoopEntryTime()
|
|
||||||
require.True(t, loopTime1.IsZero(), "Expect sync loop time to be zero")
|
|
||||||
|
|
||||||
// Start sync ticker.
|
|
||||||
syncCh := make(chan time.Time, 1)
|
|
||||||
housekeepingCh := make(chan time.Time, 1)
|
|
||||||
plegCh := make(chan *pleg.PodLifecycleEvent)
|
|
||||||
syncCh <- time.Now()
|
|
||||||
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
|
|
||||||
loopTime2 := kubelet.LatestLoopEntryTime()
|
|
||||||
require.False(t, loopTime2.IsZero(), "Expect sync loop time to be non-zero")
|
|
||||||
|
|
||||||
syncCh <- time.Now()
|
|
||||||
kubelet.syncLoopIteration(make(chan kubetypes.PodUpdate), kubelet, syncCh, housekeepingCh, plegCh)
|
|
||||||
loopTime3 := kubelet.LatestLoopEntryTime()
|
|
||||||
require.True(t, loopTime3.After(loopTime1),
|
|
||||||
"Sync Loop Time was not updated correctly. Second update timestamp %v should be greater than first update timestamp %v",
|
|
||||||
loopTime3, loopTime1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSyncLoopAbort(t *testing.T) {
|
func TestSyncLoopAbort(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
defer testKubelet.Cleanup()
|
defer testKubelet.Cleanup()
|
||||||
|
Loading…
Reference in New Issue
Block a user