mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 12:32:03 +00:00
avoid requiring runtime to be up while initializing kubelet image manager
This commit is contained in:
parent
89c3cb2f43
commit
129dbc734c
@ -76,6 +76,9 @@ type realImageManager struct {
|
||||
|
||||
// Reference to this node.
|
||||
nodeRef *api.ObjectReference
|
||||
|
||||
// Track initialization
|
||||
initialized bool
|
||||
}
|
||||
|
||||
// Information about the images we track.
|
||||
@ -105,23 +108,24 @@ func newImageManager(runtime container.Runtime, cadvisorInterface cadvisor.Inter
|
||||
cadvisor: cadvisorInterface,
|
||||
recorder: recorder,
|
||||
nodeRef: nodeRef,
|
||||
initialized: false,
|
||||
}
|
||||
|
||||
return im, nil
|
||||
}
|
||||
|
||||
func (im *realImageManager) Start() error {
|
||||
// Initial detection make detected time "unknown" in the past.
|
||||
var zero time.Time
|
||||
err := im.detectImages(zero)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go util.Until(func() {
|
||||
err := im.detectImages(time.Now())
|
||||
// Initial detection make detected time "unknown" in the past.
|
||||
var ts time.Time
|
||||
if im.initialized {
|
||||
ts = time.Now()
|
||||
}
|
||||
err := im.detectImages(ts)
|
||||
if err != nil {
|
||||
glog.Warningf("[ImageManager] Failed to monitor images: %v", err)
|
||||
} else {
|
||||
im.initialized = true
|
||||
}
|
||||
}, 5*time.Minute, util.NeverStop)
|
||||
|
||||
|
@ -396,10 +396,6 @@ func NewMainKubelet(
|
||||
}
|
||||
klet.containerManager = containerManager
|
||||
|
||||
// Wait for the runtime to be up with a timeout.
|
||||
if err := waitUntilRuntimeIsUp(klet.containerRuntime, maxWaitForContainerRuntime); err != nil {
|
||||
return nil, fmt.Errorf("timed out waiting for %q to come up: %v", containerRuntime, err)
|
||||
}
|
||||
klet.runtimeState.setRuntimeSync(time.Now())
|
||||
|
||||
klet.runner = klet.containerRuntime
|
||||
@ -429,7 +425,6 @@ func NewMainKubelet(
|
||||
|
||||
klet.backOff = util.NewBackOff(resyncInterval, MaxContainerBackOff)
|
||||
klet.podKillingCh = make(chan *kubecontainer.Pod, podKillingChannelCapacity)
|
||||
|
||||
klet.sourcesSeen = sets.NewString()
|
||||
return klet, nil
|
||||
}
|
||||
@ -842,12 +837,11 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
kl.runtimeState.setInitError(err)
|
||||
}
|
||||
|
||||
go util.Until(kl.syncNetworkStatus, 30*time.Second, util.NeverStop)
|
||||
if kl.kubeClient != nil {
|
||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
||||
go util.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, util.NeverStop)
|
||||
}
|
||||
|
||||
go util.Until(kl.syncNetworkStatus, 30*time.Second, util.NeverStop)
|
||||
go util.Until(kl.updateRuntimeUp, 5*time.Second, util.NeverStop)
|
||||
|
||||
// Start a goroutine responsible for killing pods (that are not properly
|
||||
|
Loading…
Reference in New Issue
Block a user