mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-14 21:53:52 +00:00
E2E test for statefulset burst
This commit is contained in:
@@ -265,14 +265,21 @@ func (s *StatefulSetTester) GetPodList(ss *apps.StatefulSet) *v1.PodList {
|
|||||||
|
|
||||||
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
|
// ConfirmStatefulPodCount asserts that the current number of Pods in ss is count waiting up to timeout for ss to
|
||||||
// to scale to count.
|
// to scale to count.
|
||||||
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration) {
|
func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.StatefulSet, timeout time.Duration, hard bool) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
deadline := start.Add(timeout)
|
deadline := start.Add(timeout)
|
||||||
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
for t := time.Now(); t.Before(deadline); t = time.Now() {
|
||||||
podList := s.GetPodList(ss)
|
podList := s.GetPodList(ss)
|
||||||
statefulPodCount := len(podList.Items)
|
statefulPodCount := len(podList.Items)
|
||||||
if statefulPodCount != count {
|
if statefulPodCount != count {
|
||||||
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas: %+v", ss.Name, count, len(podList.Items), podList)
|
logPodStates(podList.Items)
|
||||||
|
if hard {
|
||||||
|
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||||
|
} else {
|
||||||
|
Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
|
Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@@ -311,7 +311,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
|||||||
sst.UpdateReplicas(ss, 1)
|
sst.UpdateReplicas(ss, 1)
|
||||||
|
|
||||||
By("Verifying that the 2nd pod wont be removed if it is not running and ready")
|
By("Verifying that the 2nd pod wont be removed if it is not running and ready")
|
||||||
sst.ConfirmStatefulPodCount(2, ss, 10*time.Second)
|
sst.ConfirmStatefulPodCount(2, ss, 10*time.Second, true)
|
||||||
expectedPodName := ss.Name + "-1"
|
expectedPodName := ss.Name + "-1"
|
||||||
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{})
|
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@@ -369,7 +369,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
|||||||
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||||
sst.WaitForStatus(ss, 0)
|
sst.WaitForStatus(ss, 0)
|
||||||
sst.UpdateReplicas(ss, 3)
|
sst.UpdateReplicas(ss, 3)
|
||||||
sst.ConfirmStatefulPodCount(1, ss, 10*time.Second)
|
sst.ConfirmStatefulPodCount(1, ss, 10*time.Second, true)
|
||||||
|
|
||||||
By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||||
sst.RestoreProbe(ss, testProbe)
|
sst.RestoreProbe(ss, testProbe)
|
||||||
@@ -400,7 +400,7 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
|||||||
sst.WaitForStatus(ss, 0)
|
sst.WaitForStatus(ss, 0)
|
||||||
sst.WaitForRunningAndNotReady(3, ss)
|
sst.WaitForRunningAndNotReady(3, ss)
|
||||||
sst.UpdateReplicas(ss, 0)
|
sst.UpdateReplicas(ss, 0)
|
||||||
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second)
|
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, true)
|
||||||
|
|
||||||
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||||
sst.RestoreProbe(ss, testProbe)
|
sst.RestoreProbe(ss, testProbe)
|
||||||
@@ -422,6 +422,47 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("Burst scaling should run to completion even with unhealthy pods", func() {
|
||||||
|
psLabels := klabels.Set(labels)
|
||||||
|
|
||||||
|
By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||||
|
testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
|
||||||
|
Path: "/index.html",
|
||||||
|
Port: intstr.IntOrString{IntVal: 80}}}}
|
||||||
|
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||||
|
ss.Spec.PodManagementPolicy = apps.ParallelPodManagement
|
||||||
|
ss.Spec.Template.Spec.Containers[0].ReadinessProbe = testProbe
|
||||||
|
ss, err := c.Apps().StatefulSets(ns).Create(ss)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||||
|
sst := framework.NewStatefulSetTester(c)
|
||||||
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
|
|
||||||
|
By("Confirming that stateful set scale up will not halt with unhealthy stateful pod")
|
||||||
|
sst.BreakProbe(ss, testProbe)
|
||||||
|
sst.WaitForRunningAndNotReady(*ss.Spec.Replicas, ss)
|
||||||
|
sst.WaitForStatus(ss, 0)
|
||||||
|
sst.UpdateReplicas(ss, 3)
|
||||||
|
sst.ConfirmStatefulPodCount(3, ss, 10*time.Second, false)
|
||||||
|
|
||||||
|
By("Scaling up stateful set " + ssName + " to 3 replicas and waiting until all of them will be running in namespace " + ns)
|
||||||
|
sst.RestoreProbe(ss, testProbe)
|
||||||
|
sst.WaitForRunningAndReady(3, ss)
|
||||||
|
|
||||||
|
By("Scale down will not halt with unhealthy stateful pod")
|
||||||
|
sst.BreakProbe(ss, testProbe)
|
||||||
|
sst.WaitForStatus(ss, 0)
|
||||||
|
sst.WaitForRunningAndNotReady(3, ss)
|
||||||
|
sst.UpdateReplicas(ss, 0)
|
||||||
|
sst.ConfirmStatefulPodCount(0, ss, 10*time.Second, false)
|
||||||
|
|
||||||
|
By("Scaling down stateful set " + ssName + " to 0 replicas and waiting until none of pods will run in namespace" + ns)
|
||||||
|
sst.RestoreProbe(ss, testProbe)
|
||||||
|
sst.Scale(ss, 0)
|
||||||
|
sst.WaitForStatus(ss, 0)
|
||||||
|
})
|
||||||
|
|
||||||
It("Should recreate evicted statefulset", func() {
|
It("Should recreate evicted statefulset", func() {
|
||||||
podName := "test-pod"
|
podName := "test-pod"
|
||||||
statefulPodName := ssName + "-0"
|
statefulPodName := ssName + "-0"
|
||||||
|
Reference in New Issue
Block a user