Increase cluster size by 2 in scale down test to bypass Heapster nanny issue

This commit is contained in:
Marcin Wielgus 2016-06-06 10:37:11 +02:00
parent 6460b34128
commit 95eaaeb883

View File

@ -137,14 +137,24 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
}) })
It("should correctly handle pending and scale down after deletion [Feature:ClusterSizeAutoscalingScaleDown]", func() { It("should correctly handle pending and scale down after deletion [Feature:ClusterSizeAutoscalingScaleDown]", func() {
By("Small pending pods increase cluster size") By("Manually increase cluster size")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) increasedSize := 0
// Verify, that cluster size is increased newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2
increasedSize += val + 2
}
restoreSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client, framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size < nodeCount+1 }, scaleDownTimeout)) func(size int) bool { return size < increasedSize }, scaleDownTimeout))
restoreSizes(originalSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.Client,
func(size int) bool { return size <= nodeCount }, scaleDownTimeout))
}) })
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {