From 98153fcedc27b69d99c43ddafc3ca98a405898ea Mon Sep 17 00:00:00 2001 From: Jay Vyas Date: Wed, 2 Dec 2015 17:51:34 -0500 Subject: [PATCH] HPA: parameterize stasis, minReplicas, maxReplicas as struct. --- test/e2e/horizontal_pod_autoscaling.go | 82 +++++++++++++++++++------- 1 file changed, 61 insertions(+), 21 deletions(-) diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index 3205ae6b843..b55e593a063 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -26,10 +26,9 @@ import ( ) const ( - kindRC = "replicationController" - kindDeployment = "deployment" - subresource = "scale" - stabilityTimeout = 10 * time.Minute + kindRC = "replicationController" + kindDeployment = "deployment" + subresource = "scale" ) var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Skipped]", func() { @@ -60,28 +59,69 @@ var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Skipped]", f }) }) -func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { - rc = NewDynamicResourceConsumer(name, kind, 1, 250, 0, 500, 100, f) +// HPAScaleTest struct is used by the scale(...) function. +type HPAScaleTest struct { + initPods int + cpuStart int + maxCPU int64 + idealCPU int + minPods int + maxPods int + firstScale int + firstScaleStasis time.Duration + cpuBurst int + secondScale int + secondScaleStasis time.Duration +} + +// run is a method which runs an HPA lifecycle, from a starting state, to an expected +// The initial state is defined by the initPods parameter. +// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. +// The second state change is due to the CPU burst parameter, which HPA again responds to. +// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. +func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) { + rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.cpuStart, 0, scaleTest.maxCPU, 100, f) defer rc.CleanUp() - createCPUHorizontalPodAutoscaler(rc, 20) - rc.WaitForReplicas(3) - rc.EnsureDesiredReplicas(3, stabilityTimeout) - rc.ConsumeCPU(700) - rc.WaitForReplicas(5) + createCPUHorizontalPodAutoscaler(rc, scaleTest.idealCPU, scaleTest.minPods, scaleTest.maxPods) + rc.WaitForReplicas(scaleTest.firstScale) + rc.EnsureDesiredReplicas(scaleTest.firstScale, scaleTest.firstScaleStasis) + rc.ConsumeCPU(scaleTest.cpuBurst) + rc.WaitForReplicas(scaleTest.secondScale) +} + +func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { + scaleTest := &HPAScaleTest{ + initPods: 1, + cpuStart: 250, + maxCPU: 500, + idealCPU: .2 * 100, + minPods: 1, + maxPods: 5, + firstScale: 3, + firstScaleStasis: 10 * time.Minute, + cpuBurst: 700, + secondScale: 5, + } + scaleTest.run(name, kind, rc, f) } func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) { - rc = NewDynamicResourceConsumer(name, kind, 5, 400, 0, 500, 100, f) - defer rc.CleanUp() - createCPUHorizontalPodAutoscaler(rc, 30) - rc.WaitForReplicas(3) - rc.EnsureDesiredReplicas(3, stabilityTimeout) - rc.ConsumeCPU(100) - rc.WaitForReplicas(1) + scaleTest := &HPAScaleTest{ + initPods: 5, + cpuStart: 400, + maxCPU: 500, + idealCPU: .3 * 100, + minPods: 1, + maxPods: 5, + firstScale: 3, + firstScaleStasis: 10 * time.Minute, + cpuBurst: 100, + secondScale: 1, + } + scaleTest.run(name, kind, rc, f) } -func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) { - minReplicas := 1 +func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int) { hpa := &extensions.HorizontalPodAutoscaler{ ObjectMeta: api.ObjectMeta{ Name: rc.name, @@ -94,7 +134,7 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) { Subresource: subresource, }, MinReplicas: &minReplicas, - MaxReplicas: 5, + MaxReplicas: maxRepl, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: cpu}, }, }