From e2394bd3b8f5f0d3c2ef0404a016ac18119a8427 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Fri, 19 Oct 2018 14:59:24 +0200 Subject: [PATCH] Deflake e2e tests of HPA Resource consumer might use slightly more CPU than requested. That resulted in HPA sometimes increasing size of deployments during e2e tests. Deflake tests by: - Scaling up CPU requests in those tests. Resource consumer might go a fixed number of milli CPU seconds above target. Having higher requests makes the test less sensitive. - On scale down consume CPU in the middle between what would generate recommendation of expexted size and 1 pod fewer (instead of righ on edge beween expected and expected +1). Some variables were int32 but always cast to int before use. Make them int. --- .../autoscaling/horizontal_pod_autoscaling.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index f62f3757c25..37efa457428 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -96,8 +96,8 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu // HPAScaleTest struct is used by the scale(...) function. type HPAScaleTest struct { - initPods int32 - totalInitialCPUUsage int32 + initPods int + totalInitialCPUUsage int perPodCPURequest int64 targetCPUUtilizationPercent int32 minPods int32 @@ -116,7 +116,7 @@ type HPAScaleTest struct { // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) { const timeToWait = 15 * time.Minute - rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter) + rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter) defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) @@ -137,14 +137,14 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc } scaleTest := &HPAScaleTest{ initPods: 1, - totalInitialCPUUsage: 250, - perPodCPURequest: 500, + totalInitialCPUUsage: 500, + perPodCPURequest: 1000, targetCPUUtilizationPercent: 20, minPods: 1, maxPods: 5, firstScale: 3, firstScaleStasis: stasis, - cpuBurst: 700, + cpuBurst: 1400, secondScale: 5, } scaleTest.run(name, kind, rc, f) @@ -157,8 +157,8 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r } scaleTest := &HPAScaleTest{ initPods: 5, - totalInitialCPUUsage: 375, - perPodCPURequest: 500, + totalInitialCPUUsage: 650, + perPodCPURequest: 1000, targetCPUUtilizationPercent: 30, minPods: 1, maxPods: 5,