adjust parameter in cluster autoscaling test

This commit is contained in:
Aleksandra Malinowska 2017-09-20 11:40:49 +02:00
parent 02f48b6846
commit daf77aca1d
2 changed files with 3 additions and 3 deletions

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
// Calculate the CPU request of the service.
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
// Make it so that 'nodesNum' pods fit perfectly per node (in practice other things take space, so less than that will fit).
// Make it so that 'nodesNum' pods fit perfectly per node.
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
nodeCpuMillis := (&nodeCpus).MilliValue()
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
@ -87,7 +87,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
// Start the service we want to scale and wait for it to be up and running.
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's total memory.
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f)
defer resourceConsumer.CleanUp()

View File

@ -477,7 +477,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memAllocatableMb)), false, defaultTimeout)
ReserveMemory(f, "memory-reservation", 2, int(2.5*float64(memAllocatableMb)), false, defaultTimeout)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added