Add unit tests verifying the update touches old, unhealthy pods first, and only after new pods.

Signed-off-by: Maciej Szulik <soltysh@gmail.com>
This commit is contained in:
Maciej Szulik 2024-10-07 18:20:56 +02:00
parent 66b3dc1a38
commit 174288d751
No known key found for this signature in database
GPG Key ID: F15E55D276FA84C4

View File

@ -18,7 +18,10 @@ package daemon
import (
"context"
"fmt"
"math/rand"
"reflect"
"strings"
"testing"
"time"
@ -27,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2/ktesting"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/daemon/util"
@ -294,6 +298,216 @@ func TestDaemonSetUpdatesSomeOldPodsNotReady(t *testing.T) {
t.Fatalf("Expected %d old ready pods, but found %d", expectedReadyCount, readyCount)
}
}
func TestDaemonSetUpdatesSaveOldHealthyPods(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 20, nil)
err = manager.dsStore.Add(ds)
if err != nil {
t.Fatal(err)
}
expectSyncDaemonSets(t, manager, ds, podControl, 20, 0, 0)
markPodsReady(podControl.podStore)
t.Logf("first update to get 10 old pods which should never be touched")
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
maxUnavailable := 10
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
// save the pods we want to maintain running
oldReadyPods := sets.Set[string]{}
for _, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
if podutil.IsPodReady(pod) {
oldReadyPods.Insert(pod.Name)
}
}
for i := 0; i < 10; i++ {
maxUnavailable := rand.Intn(10)
t.Logf("%d iteration, maxUnavailable=%d", i+1, maxUnavailable)
intStr = intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("foo2/bar3-%d", i)
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
// only the 10 unavailable pods will be allowed to be updated
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 10, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 10, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
// verify that the ready pods are never touched
readyPods := sets.Set[string]{}
t.Logf("looking for old ready pods: %s", strings.Join(oldReadyPods.UnsortedList(), ", "))
for _, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
if podutil.IsPodReady(pod) {
readyPods.Insert(pod.Name)
}
}
if !readyPods.HasAll(oldReadyPods.UnsortedList()...) {
t.Errorf("pods have changed in %d-th iteration: %s", i,
strings.Join(oldReadyPods.Difference(readyPods).UnsortedList(), ", "))
}
}
maxUnavailable = 11
intStr = intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar4"
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
// verify that the ready pods are never touched
readyPods := sets.Set[string]{}
for _, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
if podutil.IsPodReady(pod) {
readyPods.Insert(pod.Name)
}
}
if readyPods.Len() != 9 {
t.Errorf("readyPods are different than expected, should be 9 but is %s", strings.Join(readyPods.UnsortedList(), ", "))
}
}
func TestDaemonSetUpdatesAllOldNotReadyPodsAndNewNotReadyPods(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 100, nil)
err = manager.dsStore.Add(ds)
if err != nil {
t.Fatal(err)
}
expectSyncDaemonSets(t, manager, ds, podControl, 100, 0, 0)
markPodsReady(podControl.podStore)
var hash1 string
// at this point we have 100 pods that belong to the daemonset,
// and we mark the controller revision which will be used later on to fake old pods
for _, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
hash1 = pod.Labels[apps.ControllerRevisionHashLabelKey]
break
}
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
maxUnavailable := 10
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
// we need to iterate 10 times, since we allow 10 max unavailable, to reach 100 nodes rollout
for i := 0; i < 10; i++ {
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
// make sure to mark the pods ready, otherwise the followup rollouts will fail
markPodsReady(podControl.podStore)
}
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
// to reach the following situation
// - maxUnavailable 10
// - 88 unavailable new pods
// - 2 unavailable old pods
// - 10 available old pods
oldUnavailablePods := sets.Set[string]{}
for i, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
// mark the latter 90 pods not ready
if i >= 10 {
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionFalse}
podutil.UpdatePodCondition(&pod.Status, &condition)
}
// mark the first 12 pods with older hash
if i < 12 {
pod.Labels[apps.ControllerRevisionHashLabelKey] = hash1
// note down 2 not available old pods
if i >= 10 {
oldUnavailablePods.Insert(pod.Name)
}
}
}
clearExpectations(t, manager, ds, podControl)
t.Logf("expect 2 old pods deletion in 1st iteration")
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
clearExpectations(t, manager, ds, podControl)
t.Logf("expect 2 new pods creation in 2nd iteration")
expectSyncDaemonSets(t, manager, ds, podControl, 2, 0, 0)
clearExpectations(t, manager, ds, podControl)
t.Logf("expect no modifications in 3rd iteration")
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
// check if oldUnavailablePods were replaced
t.Logf("Looking for old pods %s", strings.Join(oldUnavailablePods.UnsortedList(), ", "))
notUpdatedOldPods := sets.Set[string]{}
for _, obj := range podControl.podStore.List() {
pod := obj.(*v1.Pod)
if oldUnavailablePods.Has(pod.Name) {
notUpdatedOldPods.Insert(pod.Name)
}
}
if notUpdatedOldPods.Len() > 0 {
t.Fatalf("found not updated old pods: %s", strings.Join(notUpdatedOldPods.UnsortedList(), ", "))
}
}
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)