add e2e tests for UnhealthyPodEvictionPolicy

This commit is contained in:
Filip Křepinský 2024-06-18 20:02:12 +02:00
parent 68d34580e0
commit b779fb8387

View File

@ -38,6 +38,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
@ -45,9 +46,11 @@ import (
"k8s.io/client-go/util/retry"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/utils/ptr"
)
// schedulingTimeout is longer specifically because sometimes we need to wait
@ -301,7 +304,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}
if c.maxUnavailable.String() != "" {
createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, c.maxUnavailable)
createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, c.maxUnavailable, nil)
}
// Locate a running pod.
@ -375,7 +378,7 @@ var _ = SIGDescribe("DisruptionController", func() {
ginkgo.By("Trying to evict the same pod we tried earlier which should now be evictable")
waitForPodsOrDie(ctx, cs, ns, 3)
waitForPdbToObserveHealthyPods(ctx, cs, ns, 3)
waitForPdbToObserveHealthyPods(ctx, cs, ns, 3, 2)
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
framework.ExpectNoError(err) // the eviction is now allowed
@ -414,6 +417,112 @@ var _ = SIGDescribe("DisruptionController", func() {
framework.ExpectNoError(err) // the eviction is now allowed
})
unhealthyPodEvictionPolicyCases := []struct {
name string
unhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType
podsShouldBecomeReadyFirst bool
expectedSuccesfulPodEvictions int
}{
{
name: "should evict ready pods with Default UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: nil,
podsShouldBecomeReadyFirst: true,
expectedSuccesfulPodEvictions: 1,
},
{
name: "should evict ready pods with IfHealthyBudget UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: ptr.To(policyv1.IfHealthyBudget),
podsShouldBecomeReadyFirst: true,
expectedSuccesfulPodEvictions: 1,
},
{
name: "should evict ready pods with AlwaysAllow UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: ptr.To(policyv1.AlwaysAllow),
podsShouldBecomeReadyFirst: true,
expectedSuccesfulPodEvictions: 1,
},
{
name: "should not evict unready pods with Default UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: nil,
podsShouldBecomeReadyFirst: false,
expectedSuccesfulPodEvictions: 0,
},
{
name: "should not evict unready pods with IfHealthyBudget UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: ptr.To(policyv1.IfHealthyBudget),
podsShouldBecomeReadyFirst: false,
expectedSuccesfulPodEvictions: 0,
},
{
name: "should evict unready pods with AlwaysAllow UnhealthyPodEvictionPolicy",
unhealthyPodEvictionPolicy: ptr.To(policyv1.AlwaysAllow),
podsShouldBecomeReadyFirst: false,
expectedSuccesfulPodEvictions: 3,
},
}
for i := range unhealthyPodEvictionPolicyCases {
tc := unhealthyPodEvictionPolicyCases[i]
framework.It(tc.name, func(ctx context.Context) {
ginkgo.By("Creating a pdb")
createPDBMaxUnavailableOrDie(ctx, cs, ns, defaultName, intstr.FromInt32(1), tc.unhealthyPodEvictionPolicy)
ginkgo.By("Creating a replica set")
rsName := "test-rs-with-delayed-ready"
replicas := int32(3)
rs := newRS(rsName, replicas, defaultLabels, WebserverImageName, WebserverImage, nil)
rs.Labels["name"] = rsName
initialDelaySeconds := framework.PodStartTimeout.Seconds() + 30
if tc.podsShouldBecomeReadyFirst {
initialDelaySeconds = 0
}
rs.Spec.Template.Spec.Containers[0].ReadinessProbe = &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80},
},
},
InitialDelaySeconds: int32(initialDelaySeconds),
}
_, err := cs.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{})
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
if tc.podsShouldBecomeReadyFirst {
ginkgo.By("Wait for pods to be running and ready")
waitForPodsOrDie(ctx, cs, ns, int(replicas))
waitForPdbToObserveHealthyPods(ctx, cs, ns, replicas, replicas-1)
} else {
ginkgo.By("Wait for pods to be running and not ready")
err := e2epod.VerifyPodsRunning(ctx, cs, ns, rsName, false, replicas)
framework.ExpectNoError(err)
waitForPdbToObserveHealthyPods(ctx, cs, ns, 0, replicas-1)
}
ginkgo.By("Try to evict all pods guarded by a PDB")
podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err)
evictedPods := sets.New[string]()
for _, pod := range podList.Items {
e := &policyv1.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: ns,
},
}
err = cs.CoreV1().Pods(ns).EvictV1(ctx, e)
if err == nil {
evictedPods.Insert(pod.Name)
}
}
gomega.Expect(evictedPods).Should(gomega.HaveLen(tc.expectedSuccesfulPodEvictions))
_, err = e2epod.WaitForPods(ctx, cs, ns, metav1.ListOptions{}, e2epod.Range{NoneMatching: true}, framework.PodDeleteTimeout, "evicted pods should be deleted", func(pod *v1.Pod) bool {
return evictedPods.Has(pod.Name) && pod.DeletionTimestamp == nil
})
framework.ExpectNoError(err)
})
}
})
func createPDBMinAvailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, minAvailable intstr.IntOrString, labels map[string]string) {
@ -433,15 +542,16 @@ func createPDBMinAvailableOrDie(ctx context.Context, cs kubernetes.Interface, ns
waitForPdbToBeProcessed(ctx, cs, ns, name)
}
func createPDBMaxUnavailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString) {
func createPDBMaxUnavailableOrDie(ctx context.Context, cs kubernetes.Interface, ns string, name string, maxUnavailable intstr.IntOrString, unhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType) {
pdb := policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: policyv1.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
MaxUnavailable: &maxUnavailable,
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
MaxUnavailable: &maxUnavailable,
UnhealthyPodEvictionPolicy: unhealthyPodEvictionPolicy,
},
}
_, err := cs.PolicyV1().PodDisruptionBudgets(ns).Create(ctx, &pdb, metav1.CreateOptions{})
@ -670,7 +780,7 @@ func waitForPdbToBeDeleted(ctx context.Context, cs kubernetes.Interface, ns stri
framework.ExpectNoError(err, "Waiting for the pdb to be deleted in namespace %s", ns)
}
func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount int32) {
func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface, ns string, healthyCount, desiredHealthy int32) {
ginkgo.By("Waiting for the pdb to observed all healthy pods")
err := wait.PollUntilContextTimeout(ctx, framework.Poll, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) {
pdb, err := cs.PolicyV1().PodDisruptionBudgets(ns).Get(ctx, "foo", metav1.GetOptions{})
@ -680,6 +790,9 @@ func waitForPdbToObserveHealthyPods(ctx context.Context, cs kubernetes.Interface
if pdb.Status.CurrentHealthy != healthyCount {
return false, nil
}
if pdb.Status.DesiredHealthy != desiredHealthy {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "Waiting for the pdb in namespace %s to observed %d healthy pods", ns, healthyCount)