diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index b75a9a094b0..f5827b53280 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -138,7 +138,7 @@ const ( housekeepingPeriod = time.Second * 2 // Period for performing eviction monitoring. - // TODO ensure this is in sync with internal cadvisor housekeeping. + // ensure this is kept in sync with internal cadvisor housekeeping. evictionMonitoringPeriod = time.Second * 10 // The path in containers' filesystems where the hosts file is mounted. @@ -440,9 +440,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, } nodeLister := corelisters.NewNodeLister(nodeIndexer) - // TODO: get the real node object of ourself, - // and use the real node name and UID. - // TODO: what is namespace for node? + // construct a node reference used for events nodeRef := &v1.ObjectReference{ Kind: "Node", Name: string(nodeName), @@ -948,8 +946,6 @@ type Kubelet struct { streamingRuntime kubecontainer.StreamingRuntime // Container runtime service (needed by container runtime Start()). - // TODO(CD): try to make this available without holding a reference in this - // struct. For example, by adding a getter to generic runtime. runtimeService internalapi.RuntimeService // reasonCache caches the failure reason of the last creation of all containers, which is @@ -1069,7 +1065,6 @@ type Kubelet struct { // maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() lastNodeUnschedulable bool - // TODO: think about moving this to be centralized in PodWorkers in follow-on. // the list of handlers to call during pod admission. admitHandlers lifecycle.PodAdmitHandlers @@ -1275,7 +1270,6 @@ func (kl *Kubelet) initializeModules() error { func (kl *Kubelet) initializeRuntimeDependentModules() { if err := kl.cadvisor.Start(); err != nil { // Fail kubelet and rely on the babysitter to retry starting kubelet. - // TODO(random-liu): Add backoff logic in the babysitter klog.Fatalf("Failed to start cAdvisor %v", err) } @@ -1680,7 +1674,6 @@ func (kl *Kubelet) deletePod(pod *v1.Pod) error { podPair := kubecontainer.PodPair{APIPod: pod, RunningPod: &runningPod} kl.podKiller.KillPod(&podPair) - // TODO: delete the mirror pod here? // We leave the volume/directory cleanup to the periodic cleanup routine. return nil @@ -2003,8 +1996,6 @@ func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) { kl.handleMirrorPod(pod, start) continue } - // TODO: Evaluate if we need to validate and reject updates. - mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) kl.dispatchWork(pod, kubetypes.SyncPodUpdate, mirrorPod, start) } @@ -2093,7 +2084,6 @@ func (kl *Kubelet) updateRuntimeUp() { return } // Periodically log the whole runtime status for debugging. - // TODO(random-liu): Consider to send node event when optional // condition is unmet. klog.V(4).Infof("Container runtime status: %v", s) networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady) diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index bb4a7e379d3..64d3a257cb2 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -524,8 +524,6 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // message for the node. func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) { klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) - // TODO: This requires a transaction, either both node status is updated - // and event is recorded or neither should happen, see issue #6055. kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event) } diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 386f82d1591..4df062a172b 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -836,7 +836,6 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co } // One of the following arguments must be non-nil: runningPod, status. -// TODO: Modify containerRuntime.KillPod() to accept the right arguments. func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error { var p kubecontainer.Pod if runningPod != nil { @@ -1087,7 +1086,6 @@ func (kl *Kubelet) HandlePodCleanups() error { desiredPods[pod.UID] = sets.Empty{} } // Stop the workers for no-longer existing pods. - // TODO: is here the best place to forget pod workers? kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods) kl.probeManager.CleanupPods(desiredPods)