diff --git a/test/e2e/autoscaling/BUILD b/test/e2e/autoscaling/BUILD index b53160c6cd3..0159ea9909c 100644 --- a/test/e2e/autoscaling/BUILD +++ b/test/e2e/autoscaling/BUILD @@ -17,6 +17,7 @@ go_library( ], tags = ["automanaged"], deps = [ + "//pkg/api:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index fb78e979141..f480cffddca 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -39,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/scheduling" @@ -589,6 +590,51 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) + It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() { + framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests") + By("Find smallest node group and manually scale it to a single node") + minMig := "" + minSize := nodeCount + for mig, size := range originalSizes { + if size <= minSize { + minMig = mig + minSize = size + } + } + err := framework.ResizeGroup(minMig, int32(1)) + framework.ExpectNoError(err) + framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount-minSize+1, resizeTimeout)) + + By("Make the single node unschedulable") + allNodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{ + "spec.unschedulable": "false", + }.AsSelector().String()}) + framework.ExpectNoError(err) + ngNodes, err := framework.GetGroupNodes(minMig) + framework.ExpectNoError(err) + By(fmt.Sprintf("Target nodes for scale-down: %s", ngNodes)) + Expect(len(ngNodes) == 1).To(BeTrue()) + node, err := f.ClientSet.Core().Nodes().Get(ngNodes[0], metav1.GetOptions{}) + framework.ExpectNoError(err) + makeNodeUnschedulable(f.ClientSet, node) + + By("Manually drain the single node") + podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} + pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts) + framework.ExpectNoError(err) + for _, pod := range pods.Items { + err = f.ClientSet.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + framework.ExpectNoError(err) + } + + By("The node should be removed") + framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, + func(size int) bool { return size < len(allNodes.Items) }, scaleDownTimeout)) + minSize, err = framework.GroupSize(minMig) + framework.ExpectNoError(err) + Expect(minSize).Should(Equal(0)) + }) + It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() { clusterSize := nodeCount for clusterSize < unhealthyClusterThreshold+1 {