From cb6a8a25644729b6b28db084482eaa33c23abd71 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 30 Oct 2018 17:50:54 +0100 Subject: [PATCH] Deflake HPA tests - Scale down based on custom metric was flaking. Increase target value of the metric. - Scale down based on CPU was flaking during stabilization. Increase tolerance of stabilization (caused by resource consumer using more CPU than requested). --- .../custom_metrics_stackdriver_autoscaling.go | 2 +- test/e2e/autoscaling/horizontal_pod_autoscaling.go | 6 +++--- test/e2e/common/autoscaling_utils.go | 12 +++++++++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 5277cb9c5df..c562b056229 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -109,7 +109,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me initialReplicas := 2 // metric should cause scale down metricValue := externalMetricValue - metricAverageTarget := 2 * metricValue + metricAverageTarget := (3 * metricValue) / 2 metricTargets := map[string]externalMetricTarget{ "target_average": { value: metricAverageTarget, diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 651ece81af2..0a250d750e6 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -102,7 +102,7 @@ type HPAScaleTest struct { targetCPUUtilizationPercent int32 minPods int32 maxPods int32 - firstScale int32 + firstScale int firstScaleStasis time.Duration cpuBurst int secondScale int32 @@ -120,9 +120,9 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) - rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait) + rc.WaitForReplicas(scaleTest.firstScale, timeToWait) if scaleTest.firstScaleStasis > 0 { - rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis) + rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis) } if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { rc.ConsumeCPU(scaleTest.cpuBurst) diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 0e0d202b8d4..36f5cd8d0da 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -370,12 +370,18 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D } func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) { + rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration) +} + +func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration) { interval := 10 * time.Second err := wait.PollImmediate(interval, duration, func() (bool, error) { replicas := rc.GetReplicas() - framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas) - if replicas != desiredReplicas { - return false, fmt.Errorf("number of replicas changed unexpectedly") + framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) + if replicas < minDesiredReplicas { + return false, fmt.Errorf("number of replicas below target") + } else if replicas > maxDesiredReplicas { + return false, fmt.Errorf("number of replicas above target") } else { return false, nil // Expected number of replicas found. Continue polling until timeout. }