mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #48280 from MaciekPytel/ca_scale_to_0_e2e_2
Automatic merge from submit-queue (batch tested with PRs 46091, 48280) Added scale-down-to-0 e2e for cluster autoscaler
This commit is contained in:
commit
396d8b1189
@ -17,6 +17,7 @@ go_library(
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
@ -605,6 +606,51 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
})
|
||||
|
||||
It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
|
||||
By("Find smallest node group and manually scale it to a single node")
|
||||
minMig := ""
|
||||
minSize := nodeCount
|
||||
for mig, size := range originalSizes {
|
||||
if size <= minSize {
|
||||
minMig = mig
|
||||
minSize = size
|
||||
}
|
||||
}
|
||||
err := framework.ResizeGroup(minMig, int32(1))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount-minSize+1, resizeTimeout))
|
||||
|
||||
By("Make the single node unschedulable")
|
||||
allNodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String()})
|
||||
framework.ExpectNoError(err)
|
||||
ngNodes, err := framework.GetGroupNodes(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Target nodes for scale-down: %s", ngNodes))
|
||||
Expect(len(ngNodes) == 1).To(BeTrue())
|
||||
node, err := f.ClientSet.Core().Nodes().Get(ngNodes[0], metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
makeNodeUnschedulable(f.ClientSet, node)
|
||||
|
||||
By("Manually drain the single node")
|
||||
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.Core().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
for _, pod := range pods.Items {
|
||||
err = f.ClientSet.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
By("The node should be removed")
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size < len(allNodes.Items) }, scaleDownTimeout))
|
||||
minSize, err = framework.GroupSize(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(minSize).Should(Equal(0))
|
||||
})
|
||||
|
||||
It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
clusterSize := nodeCount
|
||||
for clusterSize < unhealthyClusterThreshold+1 {
|
||||
|
Loading…
Reference in New Issue
Block a user