Added HPA lightweight e2e test

This commit is contained in:
Piotr Szczesniak 2016-03-03 11:53:54 +01:00
parent 679e27c157
commit 60430ca1fb

View File

@ -33,16 +33,17 @@ const (
// These tests don't seem to be running properly in parallel: issue: #20338. // These tests don't seem to be running properly in parallel: issue: #20338.
// //
// These tests take ~20 minutes each.
var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slow]", func() { var _ = Describe("Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *ResourceConsumer var rc *ResourceConsumer
f := NewDefaultFramework("horizontal-pod-autoscaling") f := NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5" titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability"
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1" titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability"
// TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528). // TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
// Describe("Deployment", func() { // These tests take ~20 minutes each.
// Describe("[Serial] [Slow] Deployment", func() {
// // CPU tests via deployments // // CPU tests via deployments
// It(titleUp, func() { // It(titleUp, func() {
// scaleUp("deployment", kindDeployment, rc, f) // scaleUp("deployment", kindDeployment, rc, f)
@ -52,7 +53,8 @@ var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slo
// }) // })
// }) // })
Describe("ReplicationController", func() { // These tests take ~20 minutes each.
Describe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers // CPU tests via replication controllers
It(titleUp, func() { It(titleUp, func() {
scaleUp("rc", kindRC, rc, f) scaleUp("rc", kindRC, rc, f)
@ -61,71 +63,104 @@ var _ = Describe("Horizontal pod autoscaling (scale resource: CPU) [Serial] [Slo
scaleDown("rc", kindRC, rc, f) scaleDown("rc", kindRC, rc, f)
}) })
}) })
Describe("ReplicationController light", func() {
It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", kindRC, rc, f)
})
It("Should scale from 2 pods to 1 pod using HPA version v1", func() {
scaleTest := &HPAScaleTest{
initPods: 2,
totalInitialCPUUsage: 50,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 1,
useV1: true,
}
scaleTest.run("rc-light", kindRC, rc, f)
})
})
}) })
// HPAScaleTest struct is used by the scale(...) function. // HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct { type HPAScaleTest struct {
initPods int initPods int
cpuStart int totalInitialCPUUsage int
maxCPU int64 perPodCPURequest int64
idealCPU int targetCPUUtilizationPercent int
minPods int minPods int
maxPods int maxPods int
firstScale int firstScale int
firstScaleStasis time.Duration firstScaleStasis time.Duration
cpuBurst int cpuBurst int
secondScale int secondScale int
secondScaleStasis time.Duration secondScaleStasis time.Duration
useV1 bool
} }
// run is a method which runs an HPA lifecycle, from a starting state, to an expected // run is a method which runs an HPA lifecycle, from a starting state, to an expected
// The initial state is defined by the initPods parameter. // The initial state is defined by the initPods parameter.
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change is due to the CPU burst parameter, which HPA again responds to. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) { func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *Framework) {
rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.cpuStart, 0, scaleTest.maxCPU, 100, f) rc = NewDynamicResourceConsumer(name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, scaleTest.perPodCPURequest, 100, f)
defer rc.CleanUp() defer rc.CleanUp()
createCPUHorizontalPodAutoscaler(rc, scaleTest.idealCPU, scaleTest.minPods, scaleTest.maxPods) createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods, scaleTest.useV1)
rc.WaitForReplicas(scaleTest.firstScale) rc.WaitForReplicas(scaleTest.firstScale)
rc.EnsureDesiredReplicas(scaleTest.firstScale, scaleTest.firstScaleStasis) if scaleTest.firstScaleStasis > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst) rc.EnsureDesiredReplicas(scaleTest.firstScale, scaleTest.firstScaleStasis)
rc.WaitForReplicas(scaleTest.secondScale) }
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(scaleTest.secondScale)
}
} }
func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) { func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) {
scaleTest := &HPAScaleTest{ scaleTest := &HPAScaleTest{
initPods: 1, initPods: 1,
cpuStart: 250, totalInitialCPUUsage: 250,
maxCPU: 500, perPodCPURequest: 500,
idealCPU: .2 * 100, targetCPUUtilizationPercent: 20,
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
firstScale: 3, firstScale: 3,
firstScaleStasis: 10 * time.Minute, firstScaleStasis: 10 * time.Minute,
cpuBurst: 700, cpuBurst: 700,
secondScale: 5, secondScale: 5,
} }
scaleTest.run(name, kind, rc, f) scaleTest.run(name, kind, rc, f)
} }
func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) { func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) {
scaleTest := &HPAScaleTest{ scaleTest := &HPAScaleTest{
initPods: 5, initPods: 5,
cpuStart: 400, totalInitialCPUUsage: 400,
maxCPU: 500, perPodCPURequest: 500,
idealCPU: .3 * 100, targetCPUUtilizationPercent: 30,
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
firstScale: 3, firstScale: 3,
firstScaleStasis: 10 * time.Minute, firstScaleStasis: 10 * time.Minute,
cpuBurst: 100, cpuBurst: 100,
secondScale: 1, secondScale: 1,
} }
scaleTest.run(name, kind, rc, f) scaleTest.run(name, kind, rc, f)
} }
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int) { func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int, useV1 bool) {
hpa := &extensions.HorizontalPodAutoscaler{ hpa := &extensions.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: rc.name, Name: rc.name,
@ -142,6 +177,11 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: cpu}, CPUUtilization: &extensions.CPUTargetUtilization{TargetPercentage: cpu},
}, },
} }
_, errHPA := rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) var errHPA error
if useV1 {
_, errHPA = rc.framework.Client.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
} else {
_, errHPA = rc.framework.Client.Extensions().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
}
expectNoError(errHPA) expectNoError(errHPA)
} }