From 06424676e35956bfdacbc33f009ba694fdf4d653 Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Thu, 8 Jun 2017 21:39:29 +0200 Subject: [PATCH] add e2e test for scale down to 1 --- .../autoscaling/cluster_size_autoscaling.go | 33 +++++++++++-------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index af2439aa055..80a6f7c03c0 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -392,7 +392,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { }) It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func() { - runDrainTest(f, originalSizes, 1, 1, func(increasedSize int) { + runDrainTest(f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) { By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) @@ -400,7 +400,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { }) It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func() { - runDrainTest(f, originalSizes, 1, 0, func(increasedSize int) { + runDrainTest(f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { By("No nodes should be removed") time.Sleep(scaleDownTimeout) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) @@ -409,7 +409,15 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { }) It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() { - runDrainTest(f, originalSizes, 2, 1, func(increasedSize int) { + runDrainTest(f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) { + By("Some node should be removed") + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size < increasedSize }, scaleDownTimeout)) + }) + }) + + It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func() { + runDrainTest(f, originalSizes, "kube-system", 2, 1, func(increasedSize int) { By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size < increasedSize }, scaleDownTimeout)) @@ -465,20 +473,19 @@ func execCmd(args ...string) *exec.Cmd { return exec.Command(args[0], args[1:]...) } -func runDrainTest(f *framework.Framework, migSizes map[string]int, podsPerNode, pdbSize int, verifyFunction func(int)) { +func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) { increasedSize := manuallyIncreaseClusterSize(f, migSizes) nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ "spec.unschedulable": "false", }.AsSelector().String()}) framework.ExpectNoError(err) - namespace := f.Namespace.Name numPods := len(nodes.Items) * podsPerNode testId := string(uuid.NewUUID()) // So that we can label and find pods labelMap := map[string]string{"test_id": testId} - framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, podsPerNode, "reschedulable-pods", labelMap)) + framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap)) - defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "reschedulable-pods") + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, "reschedulable-pods") By("Create a PodDisruptionBudget") minAvailable := intstr.FromInt(numPods - pdbSize) @@ -871,7 +878,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node) error { // 3. for each node: // 3a. enable scheduling on that node // 3b. increase number of replicas in RC by podsPerNode -func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, podsPerNode int, id string, labels map[string]string) error { +func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string) error { By("Run a pod on each node") for _, node := range nodes { err := makeNodeUnschedulable(f.ClientSet, &node) @@ -888,7 +895,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, podsPer Client: f.ClientSet, InternalClient: f.InternalClientset, Name: id, - Namespace: f.Namespace.Name, + Namespace: namespace, Timeout: defaultTimeout, Image: framework.GetPauseImageName(f.ClientSet), Replicas: 0, @@ -898,7 +905,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, podsPer if err != nil { return err } - rc, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get(id, metav1.GetOptions{}) + rc, err := f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } @@ -912,7 +919,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, podsPer // (we retry 409 errors in case rc reference got out of sync) for j := 0; j < 3; j++ { *rc.Spec.Replicas = int32((i + 1) * podsPerNode) - rc, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Update(rc) + rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Update(rc) if err == nil { break } @@ -920,14 +927,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, podsPer return err } glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j) - rc, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil { return err } } err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) { - rc, err = f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Get(id, metav1.GetOptions{}) + rc, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) { return false, nil }