diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 826215ca803..1e76cea54f1 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -114,17 +114,18 @@ type HPAScaleTest struct { // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) { + const timeToWait = 15 * time.Minute rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f) defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) - rc.WaitForReplicas(int(scaleTest.firstScale)) + rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait) if scaleTest.firstScaleStasis > 0 { rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis) } if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { rc.ConsumeCPU(scaleTest.cpuBurst) - rc.WaitForReplicas(int(scaleTest.secondScale)) + rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait) } } diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index aa16ef4e207..8d15bc0295a 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -342,8 +342,7 @@ func (rc *ResourceConsumer) GetReplicas() int { return 0 } -func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) { - duration := 15 * time.Minute +func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) { interval := 20 * time.Second err := wait.PollImmediate(interval, duration, func() (bool, error) { replicas := rc.GetReplicas() diff --git a/test/e2e/stackdriver_monitoring.go b/test/e2e/stackdriver_monitoring.go index f8060d90854..f175c13e7d3 100644 --- a/test/e2e/stackdriver_monitoring.go +++ b/test/e2e/stackdriver_monitoring.go @@ -89,7 +89,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f) defer rc.CleanUp() - rc.WaitForReplicas(pods) + rc.WaitForReplicas(pods, 15*time.Minute) metricsMap := map[string]bool{} pollingFunction := checkForMetrics(projectId, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU) diff --git a/test/e2e/upgrades/horizontal_pod_autoscalers.go b/test/e2e/upgrades/horizontal_pod_autoscalers.go index 51074e91a98..bb5110ebb38 100644 --- a/test/e2e/upgrades/horizontal_pod_autoscalers.go +++ b/test/e2e/upgrades/horizontal_pod_autoscalers.go @@ -18,6 +18,7 @@ package upgrades import ( "fmt" + "time" autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1" "k8s.io/kubernetes/test/e2e/common" @@ -72,22 +73,23 @@ func (t *HPAUpgradeTest) Teardown(f *framework.Framework) { } func (t *HPAUpgradeTest) test() { + const timeToWait = 15 * time.Minute t.rc.Resume() By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1.")) t.rc.ConsumeCPU(10) /* millicores */ By(fmt.Sprintf("HPA waits for 1 replica")) - t.rc.WaitForReplicas(1) + t.rc.WaitForReplicas(1, timeToWait) By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores.")) t.rc.ConsumeCPU(250) /* millicores */ By(fmt.Sprintf("HPA waits for 3 replicas")) - t.rc.WaitForReplicas(3) + t.rc.WaitForReplicas(3, timeToWait) By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5.")) t.rc.ConsumeCPU(700) /* millicores */ By(fmt.Sprintf("HPA waits for 5 replicas")) - t.rc.WaitForReplicas(5) + t.rc.WaitForReplicas(5, timeToWait) // We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail. t.rc.Pause()