From 9b300c978b1e6a1781146be3f5ff21aa168ea412 Mon Sep 17 00:00:00 2001 From: Marshall Brekka Date: Fri, 9 Feb 2024 14:36:23 -0800 Subject: [PATCH] Fix daemonset to ensure old unhealthy pods are counted towards max unavailable budget. --- pkg/controller/daemon/update.go | 1 + pkg/controller/daemon/update_test.go | 69 ++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go index 8ad023f242f..389c16319b7 100644 --- a/pkg/controller/daemon/update.go +++ b/pkg/controller/daemon/update.go @@ -99,6 +99,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae allowedReplacementPods = make([]string, 0, len(nodeToDaemonPods)) } allowedReplacementPods = append(allowedReplacementPods, oldPod.Name) + numUnavailable++ case numUnavailable >= maxUnavailable: // no point considering any other candidates continue diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go index 855097b16da..73931fa92c9 100644 --- a/pkg/controller/daemon/update_test.go +++ b/pkg/controller/daemon/update_test.go @@ -226,6 +226,75 @@ func TestDaemonSetUpdatesWhenNewPodIsNotReady(t *testing.T) { clearExpectations(t, manager, ds, podControl) } +func TestDaemonSetUpdatesSomeOldPodsNotReady(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + ds := newDaemonSet("foo") + manager, podControl, _, err := newTestController(ctx, ds) + if err != nil { + t.Fatalf("error creating DaemonSets controller: %v", err) + } + maxUnavailable := 2 + addNodes(manager.nodeStore, 0, 5, nil) + err = manager.dsStore.Add(ds) + if err != nil { + t.Fatal(err) + } + expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) + markPodsReady(podControl.podStore) + + ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" + ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType + intStr := intstr.FromInt(maxUnavailable) + ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } + + // All old pods are available, should update 2 + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + clearExpectations(t, manager, ds, podControl) + + // Perform another update, verify we delete and create 2 pods, three available should remain untouched + + ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar3" + err = manager.dsStore.Update(ds) + if err != nil { + t.Fatal(err) + } + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) + + clearExpectations(t, manager, ds, podControl) + expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) + clearExpectations(t, manager, ds, podControl) + + readyCount := 0 + expectedReadyCount := 5 - maxUnavailable + for _, obj := range podControl.podStore.List() { + pod := obj.(*v1.Pod) + n, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady) + if n != -1 && condition.Status == v1.ConditionTrue { + readyCount++ + } + } + + if readyCount != expectedReadyCount { + t.Fatalf("Expected %d old ready pods, but found %d", expectedReadyCount, readyCount) + } +} + func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { _, ctx := ktesting.NewTestContext(t) ds := newDaemonSet("foo")