diff --git a/pkg/kubelet/pod_workers.go b/pkg/kubelet/pod_workers.go index 8d72e1ad55e..a176b2fb984 100644 --- a/pkg/kubelet/pod_workers.go +++ b/pkg/kubelet/pod_workers.go @@ -775,16 +775,23 @@ func (p *podWorkers) UpdatePod(options UpdatePodOptions) { } // if this pod is being synced for the first time, we need to make sure it is an active pod if options.Pod != nil && (options.Pod.Status.Phase == v1.PodFailed || options.Pod.Status.Phase == v1.PodSucceeded) { - // check to see if the pod is not running and the pod is terminal. - // If this succeeds then record in the podWorker that it is terminated. + // Check to see if the pod is not running and the pod is terminal; if this succeeds then record in the podWorker that it is terminated. + // This is needed because after a kubelet restart, we need to ensure terminal pods will NOT be considered active in Pod Admission. See http://issues.k8s.io/105523 + // However, `filterOutInactivePods`, considers pods that are actively terminating as active. As a result, `IsPodKnownTerminated()` needs to return true and thus `terminatedAt` needs to be set. if statusCache, err := p.podCache.Get(uid); err == nil { if isPodStatusCacheTerminal(statusCache) { + // At this point we know: + // (1) The pod is terminal based on the config source. + // (2) The pod is terminal based on the runtime cache. + // This implies that this pod had already completed `SyncTerminatingPod` sometime in the past. The pod is likely being synced for the first time due to a kubelet restart. + // These pods need to complete SyncTerminatedPod to ensure that all resources are cleaned and that the status manager makes the final status updates for the pod. + // As a result, set finished: false, to ensure a Terminated event will be sent and `SyncTerminatedPod` will run. status = &podSyncStatus{ terminatedAt: now, terminatingAt: now, syncedAt: now, startedTerminating: true, - finished: true, + finished: false, fullname: kubecontainer.BuildPodFullName(name, ns), } } diff --git a/pkg/kubelet/pod_workers_test.go b/pkg/kubelet/pod_workers_test.go index 3fcd2465c14..8fca3c255a8 100644 --- a/pkg/kubelet/pod_workers_test.go +++ b/pkg/kubelet/pod_workers_test.go @@ -754,23 +754,41 @@ func TestUpdatePod(t *testing.T) { expectKnownTerminated: true, }, { - name: "a pod that is terminal and has never started is finished immediately if the runtime has a cached terminal state", + name: "a pod that is terminal and has never started advances to finished if the runtime has a cached terminal state", update: UpdatePodOptions{ UpdateType: kubetypes.SyncPodCreate, Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded), }, runtimeStatus: &kubecontainer.PodStatus{ /* we know about this pod */ }, - expect: &podSyncStatus{ + expectBeforeWorker: &podSyncStatus{ + fullname: "done-pod_ns", + syncedAt: time.Unix(1, 0), + terminatingAt: time.Unix(1, 0), + terminatedAt: time.Unix(1, 0), + pendingUpdate: &UpdatePodOptions{ + UpdateType: kubetypes.SyncPodCreate, + Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded), + }, + finished: false, // Should be marked as not finished initially (to ensure `SyncTerminatedPod` will run) and status will progress to terminated. + startedTerminating: true, + working: true, + }, + expect: hasContext(&podSyncStatus{ fullname: "done-pod_ns", syncedAt: time.Unix(1, 0), terminatingAt: time.Unix(1, 0), terminatedAt: time.Unix(1, 0), + startedAt: time.Unix(3, 0), startedTerminating: true, finished: true, + activeUpdate: &UpdatePodOptions{ + UpdateType: kubetypes.SyncPodSync, + Pod: newPodWithPhase("1", "done-pod", v1.PodSucceeded), + }, // if we have never seen the pod before, a restart makes no sense restartRequested: false, - }, + }), expectKnownTerminated: true, }, {