mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Merge pull request #94109 from derekwaynecarr/cleanup-kubelet-todos
Cleanup kubelet TODOs that are no longer pertinent.
This commit is contained in:
commit
47943d5f9c
@ -137,7 +137,7 @@ const (
|
|||||||
housekeepingPeriod = time.Second * 2
|
housekeepingPeriod = time.Second * 2
|
||||||
|
|
||||||
// Period for performing eviction monitoring.
|
// Period for performing eviction monitoring.
|
||||||
// TODO ensure this is in sync with internal cadvisor housekeeping.
|
// ensure this is kept in sync with internal cadvisor housekeeping.
|
||||||
evictionMonitoringPeriod = time.Second * 10
|
evictionMonitoringPeriod = time.Second * 10
|
||||||
|
|
||||||
// The path in containers' filesystems where the hosts file is mounted.
|
// The path in containers' filesystems where the hosts file is mounted.
|
||||||
@ -442,9 +442,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
}
|
}
|
||||||
nodeLister := corelisters.NewNodeLister(nodeIndexer)
|
nodeLister := corelisters.NewNodeLister(nodeIndexer)
|
||||||
|
|
||||||
// TODO: get the real node object of ourself,
|
// construct a node reference used for events
|
||||||
// and use the real node name and UID.
|
|
||||||
// TODO: what is namespace for node?
|
|
||||||
nodeRef := &v1.ObjectReference{
|
nodeRef := &v1.ObjectReference{
|
||||||
Kind: "Node",
|
Kind: "Node",
|
||||||
Name: string(nodeName),
|
Name: string(nodeName),
|
||||||
@ -958,8 +956,6 @@ type Kubelet struct {
|
|||||||
streamingRuntime kubecontainer.StreamingRuntime
|
streamingRuntime kubecontainer.StreamingRuntime
|
||||||
|
|
||||||
// Container runtime service (needed by container runtime Start()).
|
// Container runtime service (needed by container runtime Start()).
|
||||||
// TODO(CD): try to make this available without holding a reference in this
|
|
||||||
// struct. For example, by adding a getter to generic runtime.
|
|
||||||
runtimeService internalapi.RuntimeService
|
runtimeService internalapi.RuntimeService
|
||||||
|
|
||||||
// reasonCache caches the failure reason of the last creation of all containers, which is
|
// reasonCache caches the failure reason of the last creation of all containers, which is
|
||||||
@ -1079,7 +1075,6 @@ type Kubelet struct {
|
|||||||
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
// maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||||
lastNodeUnschedulable bool
|
lastNodeUnschedulable bool
|
||||||
|
|
||||||
// TODO: think about moving this to be centralized in PodWorkers in follow-on.
|
|
||||||
// the list of handlers to call during pod admission.
|
// the list of handlers to call during pod admission.
|
||||||
admitHandlers lifecycle.PodAdmitHandlers
|
admitHandlers lifecycle.PodAdmitHandlers
|
||||||
|
|
||||||
@ -1337,7 +1332,6 @@ func (kl *Kubelet) initializeModules() error {
|
|||||||
func (kl *Kubelet) initializeRuntimeDependentModules() {
|
func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||||
if err := kl.cadvisor.Start(); err != nil {
|
if err := kl.cadvisor.Start(); err != nil {
|
||||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||||
// TODO(random-liu): Add backoff logic in the babysitter
|
|
||||||
klog.Fatalf("Failed to start cAdvisor %v", err)
|
klog.Fatalf("Failed to start cAdvisor %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1744,7 +1738,6 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error {
|
|||||||
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
|
podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod}
|
||||||
|
|
||||||
kl.podKiller.KillPod(&podPair)
|
kl.podKiller.KillPod(&podPair)
|
||||||
// TODO: delete the mirror pod here?
|
|
||||||
|
|
||||||
// We leave the volume/directory cleanup to the periodic cleanup routine.
|
// We leave the volume/directory cleanup to the periodic cleanup routine.
|
||||||
return nil
|
return nil
|
||||||
@ -2070,8 +2063,6 @@ func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
|
|||||||
kl.handleMirrorPod(pod, start)
|
kl.handleMirrorPod(pod, start)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// TODO: Evaluate if we need to validate and reject updates.
|
|
||||||
|
|
||||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||||
kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start)
|
kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start)
|
||||||
}
|
}
|
||||||
@ -2160,7 +2151,6 @@ func (kl *Kubelet) updateRuntimeUp() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Periodically log the whole runtime status for debugging.
|
// Periodically log the whole runtime status for debugging.
|
||||||
// TODO(random-liu): Consider to send node event when optional
|
|
||||||
// condition is unmet.
|
// condition is unmet.
|
||||||
klog.V(4).Infof("Container runtime status: %v", s)
|
klog.V(4).Infof("Container runtime status: %v", s)
|
||||||
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
|
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
|
||||||
|
@ -524,8 +524,6 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
|||||||
// message for the node.
|
// message for the node.
|
||||||
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
|
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
|
||||||
klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
|
||||||
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
|
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -834,7 +834,6 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
// One of the following arguments must be non-nil: runningPod, status.
|
// One of the following arguments must be non-nil: runningPod, status.
|
||||||
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
|
|
||||||
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
|
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
|
||||||
var p kubecontainer.Pod
|
var p kubecontainer.Pod
|
||||||
if runningPod != nil {
|
if runningPod != nil {
|
||||||
@ -1095,7 +1094,6 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||||||
desiredPods[pod.UID] = sets.Empty{}
|
desiredPods[pod.UID] = sets.Empty{}
|
||||||
}
|
}
|
||||||
// Stop the workers for no-longer existing pods.
|
// Stop the workers for no-longer existing pods.
|
||||||
// TODO: is here the best place to forget pod workers?
|
|
||||||
kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods)
|
kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods)
|
||||||
kl.probeManager.CleanupPods(desiredPods)
|
kl.probeManager.CleanupPods(desiredPods)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user