diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index 087407382f9..ae2b164b7d8 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -79,7 +79,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" // Calculate the CPU request of the service. // This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes. - // Make it so that 'nodesNum' pods fit perfectly per node (in practice other things take space, so less than that will fit). + // Make it so that 'nodesNum' pods fit perfectly per node. nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU] nodeCpuMillis := (&nodeCpus).MilliValue() cpuRequestMillis := int64(nodeCpuMillis / nodesNum) @@ -87,7 +87,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" // Start the service we want to scale and wait for it to be up and running. nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory] nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 - memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's total memory. + memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. replicas := 1 resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f) defer resourceConsumer.CleanUp() diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index dbaa43ed840..77852f6fab4 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -477,7 +477,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2)) By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") - ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memAllocatableMb)), false, defaultTimeout) + ReserveMemory(f, "memory-reservation", 2, int(2.5*float64(memAllocatableMb)), false, defaultTimeout) defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") // Apparently GKE master is restarted couple minutes after the node pool is added