Fix Cluster-Autoscaler e2e failing on some node configs

This commit is contained in:
Maciej Pytel 2017-03-22 11:58:37 +01:00
parent ee255d09fa
commit 53df30f4c6

View File

@ -112,7 +112,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memCapacityMb)), false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen")
@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memCapacityMb)), false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added