Merge pull request #98742 from gjkim42/sync-until-terminate-containers

kubelet: Sync completed pods until their containers have been terminated
This commit is contained in:
Kubernetes Prow Robot 2021-02-24 15:29:26 -08:00 committed by GitHub
commit 17c3ee8708
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 130 additions and 2 deletions

View File

@ -2019,8 +2019,9 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir
} }
// optimization: avoid invoking the pod worker if no further changes are possible to the pod definition // optimization: avoid invoking the pod worker if no further changes are possible to the pod definition
if podWorkerTerminal { // (i.e. the pod has completed and its containers have been terminated)
klog.V(4).Infof("Pod %q has completed, ignoring remaining sync work: %s", format.Pod(pod), syncType) if podWorkerTerminal && containersTerminal {
klog.V(4).InfoS("Pod has completed and its containers have been terminated, ignoring remaining sync work", "pod", klog.KObj(pod), "syncType", syncType)
return return
} }

View File

@ -473,6 +473,133 @@ func TestSyncPodsDeletesWhenSourcesAreReadyPerQOS(t *testing.T) {
assert.True(t, destroyCount >= 1, "Expect 1 or more destroys") assert.True(t, destroyCount >= 1, "Expect 1 or more destroys")
} }
func TestDispatchWorkOfCompletedPod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: func(options syncPodOptions) error {
return fmt.Errorf("should ignore completed pod %q", options.pod.Name)
},
cache: kubelet.podCache,
t: t,
}
now := metav1.NewTime(time.Now())
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "1",
Name: "completed-pod1",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodFailed,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "2",
Name: "completed-pod2",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodSucceeded,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "3",
Name: "completed-pod3",
Namespace: "ns",
Annotations: make(map[string]string),
DeletionTimestamp: &now,
},
Status: v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
},
},
}
for _, pod := range pods {
kubelet.dispatchWork(pod, kubetypes.SyncPodSync, nil, time.Now())
}
}
func TestDispatchWorkOfActivePod(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
var got bool
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: func(options syncPodOptions) error {
got = true
return nil
},
cache: kubelet.podCache,
t: t,
}
pods := []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
UID: "1",
Name: "active-pod1",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
},
{
ObjectMeta: metav1.ObjectMeta{
UID: "2",
Name: "active-pod2",
Namespace: "ns",
Annotations: make(map[string]string),
},
Status: v1.PodStatus{
Phase: v1.PodFailed,
ContainerStatuses: []v1.ContainerStatus{
{
State: v1.ContainerState{
Running: &v1.ContainerStateRunning{},
},
},
},
},
},
}
for _, pod := range pods {
kubelet.dispatchWork(pod, kubetypes.SyncPodSync, nil, time.Now())
if !got {
t.Errorf("Should not skip active pod %q", pod.Name)
}
got = false
}
}
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false ready := false