mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-21 18:11:22 +00:00
pkg/kubelet: improve the node informer sync check
GetNode() is called in a lot of places including a hot loop in fastStatusUpdateOnce. Having a poll in it is delaying the kubelet /readyz status=200 report. If a client is available attempt to wait for the sync to happen, before starting the list watch for pods at the apiserver.
This commit is contained in:
@@ -124,9 +124,6 @@ const (
|
||||
// Max amount of time to wait for the container runtime to come up.
|
||||
maxWaitForContainerRuntime = 30 * time.Second
|
||||
|
||||
// Max amount of time to wait for node list/watch to initially sync
|
||||
maxWaitForAPIServerSync = 10 * time.Second
|
||||
|
||||
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
|
||||
nodeStatusUpdateRetry = 5
|
||||
|
||||
@@ -257,7 +254,7 @@ type DockerOptions struct {
|
||||
|
||||
// makePodSourceConfig creates a config.PodConfig from the given
|
||||
// KubeletConfiguration or returns an error.
|
||||
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) {
|
||||
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, nodeHasSynced func() bool) (*config.PodConfig, error) {
|
||||
manifestURLHeader := make(http.Header)
|
||||
if len(kubeCfg.StaticPodURLHeader) > 0 {
|
||||
for k, v := range kubeCfg.StaticPodURLHeader {
|
||||
@@ -283,8 +280,8 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
||||
}
|
||||
|
||||
if kubeDeps.KubeClient != nil {
|
||||
klog.InfoS("Watching apiserver")
|
||||
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, cfg.Channel(kubetypes.ApiserverSource))
|
||||
klog.InfoS("Adding apiserver pod source")
|
||||
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, nodeHasSynced, cfg.Channel(kubetypes.ApiserverSource))
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
@@ -390,9 +387,32 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
}
|
||||
}
|
||||
|
||||
var nodeHasSynced cache.InformerSynced
|
||||
var nodeLister corelisters.NodeLister
|
||||
|
||||
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
|
||||
// If not nil, we are running as part of a cluster and should sync w/API
|
||||
if kubeDeps.KubeClient != nil {
|
||||
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
|
||||
}))
|
||||
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
|
||||
nodeHasSynced = func() bool {
|
||||
return kubeInformers.Core().V1().Nodes().Informer().HasSynced()
|
||||
}
|
||||
kubeInformers.Start(wait.NeverStop)
|
||||
klog.InfoS("Attempting to sync node with API server")
|
||||
} else {
|
||||
// we don't have a client to sync!
|
||||
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
nodeLister = corelisters.NewNodeLister(nodeIndexer)
|
||||
nodeHasSynced = func() bool { return true }
|
||||
klog.InfoS("Kubelet is running in standalone mode, will skip API server sync")
|
||||
}
|
||||
|
||||
if kubeDeps.PodConfig == nil {
|
||||
var err error
|
||||
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName)
|
||||
kubeDeps.PodConfig, err = makePodSourceConfig(kubeCfg, kubeDeps, nodeName, nodeHasSynced)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -433,8 +453,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
|
||||
var serviceLister corelisters.ServiceLister
|
||||
var serviceHasSynced cache.InformerSynced
|
||||
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
|
||||
// If not nil, we are running as part of a cluster and should sync w/API
|
||||
if kubeDeps.KubeClient != nil {
|
||||
kubeInformers := informers.NewSharedInformerFactory(kubeDeps.KubeClient, 0)
|
||||
serviceLister = kubeInformers.Core().V1().Services().Lister()
|
||||
@@ -446,31 +464,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
serviceHasSynced = func() bool { return true }
|
||||
}
|
||||
|
||||
var nodeHasSynced cache.InformerSynced
|
||||
var nodeLister corelisters.NodeLister
|
||||
|
||||
if kubeDeps.KubeClient != nil {
|
||||
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
|
||||
}))
|
||||
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
|
||||
nodeHasSynced = func() bool {
|
||||
if kubeInformers.Core().V1().Nodes().Informer().HasSynced() {
|
||||
return true
|
||||
}
|
||||
klog.InfoS("Kubelet nodes not sync")
|
||||
return false
|
||||
}
|
||||
kubeInformers.Start(wait.NeverStop)
|
||||
klog.InfoS("Kubelet client is not nil")
|
||||
} else {
|
||||
// we don't have a client to sync!
|
||||
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
nodeLister = corelisters.NewNodeLister(nodeIndexer)
|
||||
nodeHasSynced = func() bool { return true }
|
||||
klog.InfoS("Kubelet client is nil")
|
||||
}
|
||||
|
||||
// construct a node reference used for events
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
|
Reference in New Issue
Block a user