diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 9d78c2c9b08..76ec6217520 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2019,8 +2019,9 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir } // optimization: avoid invoking the pod worker if no further changes are possible to the pod definition - if podWorkerTerminal { - klog.V(4).Infof("Pod %q has completed, ignoring remaining sync work: %s", format.Pod(pod), syncType) + // (i.e. the pod has completed and its containers have been terminated) + if podWorkerTerminal && containersTerminal { + klog.V(4).InfoS("Pod has completed and its containers have been terminated, ignoring remaining sync work", "pod", klog.KObj(pod), "syncType", syncType) return } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 5fe77b9c75c..aaf74b400d0 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -473,6 +473,133 @@ func TestSyncPodsDeletesWhenSourcesAreReadyPerQOS(t *testing.T) { assert.True(t, destroyCount >= 1, "Expect 1 or more destroys") } +func TestDispatchWorkOfCompletedPod(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + defer testKubelet.Cleanup() + kubelet := testKubelet.kubelet + kubelet.podWorkers = &fakePodWorkers{ + syncPodFn: func(options syncPodOptions) error { + return fmt.Errorf("should ignore completed pod %q", options.pod.Name) + }, + cache: kubelet.podCache, + t: t, + } + now := metav1.NewTime(time.Now()) + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "1", + Name: "completed-pod1", + Namespace: "ns", + Annotations: make(map[string]string), + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "2", + Name: "completed-pod2", + Namespace: "ns", + Annotations: make(map[string]string), + }, + Status: v1.PodStatus{ + Phase: v1.PodSucceeded, + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "3", + Name: "completed-pod3", + Namespace: "ns", + Annotations: make(map[string]string), + DeletionTimestamp: &now, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{}, + }, + }, + }, + }, + }, + } + for _, pod := range pods { + kubelet.dispatchWork(pod, kubetypes.SyncPodSync, nil, time.Now()) + } +} + +func TestDispatchWorkOfActivePod(t *testing.T) { + testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) + defer testKubelet.Cleanup() + kubelet := testKubelet.kubelet + var got bool + kubelet.podWorkers = &fakePodWorkers{ + syncPodFn: func(options syncPodOptions) error { + got = true + return nil + }, + cache: kubelet.podCache, + t: t, + } + pods := []*v1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + UID: "1", + Name: "active-pod1", + Namespace: "ns", + Annotations: make(map[string]string), + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + UID: "2", + Name: "active-pod2", + Namespace: "ns", + Annotations: make(map[string]string), + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, + }, + }, + }, + }, + } + + for _, pod := range pods { + kubelet.dispatchWork(pod, kubetypes.SyncPodSync, nil, time.Now()) + if !got { + t.Errorf("Should not skip active pod %q", pod.Name) + } + got = false + } +} + func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { ready := false