Merge pull request #112252 from piotrnosek/hpa-average-metrics

Add e2e HPA Tests: CPU and Memory Average with different aggregation methods: Average Value and Utilization
This commit is contained in:
Kubernetes Prow Robot 2022-09-12 09:41:24 -07:00 committed by GitHub
commit d7d82ad972
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 336 additions and 205 deletions

View File

@ -109,7 +109,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
// Enable Horizontal Pod Autoscaler with 50% target utilization and // Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied. // scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50) targetCPUUtilizationPercent := int32(50)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10) hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name) defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad)) resourceConsumer.ConsumeCPU(int(cpuLoad))

View File

@ -19,14 +19,25 @@ package autoscaling
import ( import (
"time" "time"
"github.com/onsi/ginkgo/v2"
"k8s.io/pod-security-admission/api" "k8s.io/pod-security-admission/api"
autoscalingv2 "k8s.io/api/autoscaling/v2" autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
)
"github.com/onsi/ginkgo/v2" const (
titleUp = "Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods"
titleDown = "Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod"
titleAverageUtilization = " using Average Utilization for aggregation"
titleAverageValue = " using Average Value for aggregation"
valueMetricType = autoscalingv2.AverageValueMetricType
utilizationMetricType = autoscalingv2.UtilizationMetricType
cpuResource = v1.ResourceCPU
memResource = v1.ResourceMemory
) )
// These tests don't seem to be running properly in parallel: issue: #20338. // These tests don't seem to be running properly in parallel: issue: #20338.
@ -34,109 +45,143 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5" ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1" ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
ginkgo.Describe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
}) })
ginkgo.It(titleDown, func() { ginkgo.It(titleDown+titleAverageUtilization, func() {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, f) scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() { ginkgo.It(titleUp, func() {
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f) scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
ginkgo.It(titleDown, func() { ginkgo.It(titleDown, func() {
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, f) scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
}) })
// These tests take ~20 minutes each. // These tests take ~20 minutes each.
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() { ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", e2eautoscaling.KindRC, true, f) scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
}) })
ginkgo.It(titleDown+" and verify decision stability", func() { ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", e2eautoscaling.KindRC, true, f) scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
}) })
}) })
ginkgo.Describe("ReplicationController light", func() { ginkgo.Describe("ReplicationController light", func() {
ginkgo.It("Should scale from 1 pod to 2 pods", func() { ginkgo.It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{ st := &HPAScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 150, initCPUTotal: 150,
perPodCPURequest: 200, perPodCPURequest: 200,
targetCPUUtilizationPercent: 50, targetValue: 50,
minPods: 1, minPods: 1,
maxPods: 2, maxPods: 2,
firstScale: 2, firstScale: 2,
} }
scaleTest.run("rc-light", e2eautoscaling.KindRC, f) st.run("rc-light", e2eautoscaling.KindRC, f)
}) })
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() { ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func() {
scaleTest := &HPAScaleTest{ st := &HPAScaleTest{
initPods: 2, initPods: 2,
totalInitialCPUUsage: 50, initCPUTotal: 50,
perPodCPURequest: 200, perPodCPURequest: 200,
targetCPUUtilizationPercent: 50, targetValue: 50,
minPods: 1, minPods: 1,
maxPods: 2, maxPods: 2,
firstScale: 1, firstScale: 1,
} }
scaleTest.run("rc-light", e2eautoscaling.KindRC, f) st.run("rc-light", e2eautoscaling.KindRC, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() { ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
// ContainerResource CPU autoscaling on idle sidecar // ContainerResource CPU autoscaling on idle sidecar
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() { ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() {
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, false, f) scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
}) })
// ContainerResource CPU autoscaling on busy sidecar // ContainerResource CPU autoscaling on busy sidecar
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() { ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() {
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, true, f) doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f)
}) })
}) })
ginkgo.Describe("CustomResourceDefinition", func() { ginkgo.Describe("CustomResourceDefinition", func() {
ginkgo.It("Should scale with a CRD targetRef", func() { ginkgo.It("Should scale with a CRD targetRef", func() {
scaleTest := &HPAScaleTest{ st := &HPAScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 150, initCPUTotal: 150,
perPodCPURequest: 200, perPodCPURequest: 200,
targetCPUUtilizationPercent: 50, targetValue: 50,
minPods: 1, minPods: 1,
maxPods: 2, maxPods: 2,
firstScale: 2, firstScale: 2,
targetRef: e2eautoscaling.CustomCRDTargetRef(), targetRef: e2eautoscaling.CustomCRDTargetRef(),
} }
scaleTest.run("crd-light", e2eautoscaling.KindCRD, f) st.run("crd-light", e2eautoscaling.KindCRD, f)
})
})
})
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: Memory)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f)
}) })
}) })
}) })
// HPAScaleTest struct is used by the scale(...) function. // HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct { type HPAScaleTest struct {
initPods int initPods int
totalInitialCPUUsage int initCPUTotal int
perPodCPURequest int64 initMemTotal int
targetCPUUtilizationPercent int32 perPodCPURequest int64
minPods int32 perPodMemRequest int64
maxPods int32 targetValue int32
firstScale int minPods int32
firstScaleStasis time.Duration maxPods int32
cpuBurst int firstScale int
secondScale int32 firstScaleStasis time.Duration
targetRef autoscalingv2.CrossVersionObjectReference cpuBurst int
memBurst int
secondScale int32
targetRef autoscalingv2.CrossVersionObjectReference
resourceType v1.ResourceName
metricTargetType autoscalingv2.MetricTargetType
} }
// run is a method which runs an HPA lifecycle, from a starting state, to an expected // run is a method which runs an HPA lifecycle, from a starting state, to an expected
@ -144,145 +189,226 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts. // The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to. // The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle) initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource {
initCPUTotal = st.initCPUTotal
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp() defer rc.CleanUp()
var hpa *autoscalingv2.HorizontalPodAutoscaler hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name) defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait) rc.WaitForReplicas(st.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 { if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
} }
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst) rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait) rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
} }
} }
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) { func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 10 * time.Minute stasis = 10 * time.Minute
} }
scaleTest := &HPAScaleTest{ st := &HPAScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 250, perPodCPURequest: 500,
perPodCPURequest: 500, perPodMemRequest: 500,
targetCPUUtilizationPercent: 20, targetValue: getTargetValueByType(150, 30, metricTargetType),
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
firstScale: 3, firstScale: 3,
firstScaleStasis: stasis, firstScaleStasis: stasis,
cpuBurst: 700, secondScale: 5,
secondScale: 5, resourceType: resourceType,
metricTargetType: metricTargetType,
} }
scaleTest.run(name, kind, f) if resourceType == cpuResource {
st.initCPUTotal = 250
st.cpuBurst = 700
}
if resourceType == memResource {
st.initMemTotal = 250
st.memBurst = 700
}
st.run(name, kind, f)
} }
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) { func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 10 * time.Minute stasis = 10 * time.Minute
} }
scaleTest := &HPAScaleTest{ st := &HPAScaleTest{
initPods: 5, initPods: 5,
totalInitialCPUUsage: 325, perPodCPURequest: 500,
perPodCPURequest: 500, perPodMemRequest: 500,
targetCPUUtilizationPercent: 30, targetValue: getTargetValueByType(150, 30, metricTargetType),
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
firstScale: 3, firstScale: 3,
firstScaleStasis: stasis, firstScaleStasis: stasis,
cpuBurst: 10, cpuBurst: 10,
secondScale: 1, secondScale: 1,
resourceType: resourceType,
metricTargetType: metricTargetType,
} }
scaleTest.run(name, kind, f) if resourceType == cpuResource {
st.initCPUTotal = 325
st.cpuBurst = 10
}
if resourceType == memResource {
st.initMemTotal = 325
st.memBurst = 10
}
st.run(name, kind, f)
} }
type HPAContainerResourceScaleTest struct { type HPAContainerResourceScaleTest struct {
initPods int initPods int
totalInitialCPUUsage int initCPUTotal int
perContainerCPURequest int64 initMemTotal int
targetCPUUtilizationPercent int32 perContainerCPURequest int64
minPods int32 perContainerMemRequest int64
maxPods int32 targetValue int32
noScale bool minPods int32
noScaleStasis time.Duration maxPods int32
firstScale int noScale bool
firstScaleStasis time.Duration noScaleStasis time.Duration
cpuBurst int firstScale int
secondScale int32 firstScaleStasis time.Duration
sidecarStatus e2eautoscaling.SidecarStatusType cpuBurst int
sidecarType e2eautoscaling.SidecarWorkloadType memBurst int
secondScale int32
sidecarStatus e2eautoscaling.SidecarStatusType
sidecarType e2eautoscaling.SidecarWorkloadType
resourceType v1.ResourceName
metricTargetType autoscalingv2.MetricTargetType
} }
func (scaleTest *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perContainerCPURequest, 200, f.ClientSet, f.ScalesGetter, scaleTest.sidecarStatus, scaleTest.sidecarType) initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource {
initCPUTotal = st.initCPUTotal
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
defer rc.CleanUp() defer rc.CleanUp()
hpa := e2eautoscaling.CreateContainerResourceCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name) defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name)
if scaleTest.noScale { if st.noScale {
if scaleTest.noScaleStasis > 0 { if st.noScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.initPods, scaleTest.initPods, scaleTest.noScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(st.initPods, st.initPods, st.noScaleStasis, hpa.Name)
} }
} else { } else {
rc.WaitForReplicas(scaleTest.firstScale, timeToWait) rc.WaitForReplicas(st.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 { if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name) rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
} }
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst) rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait) rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
} }
} }
} }
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) { func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) {
st := &HPAContainerResourceScaleTest{
initPods: 1,
perContainerCPURequest: 500,
perContainerMemRequest: 500,
targetValue: getTargetValueByType(100, 20, metricTargetType),
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: 0,
secondScale: 5,
resourceType: resourceType,
metricTargetType: metricTargetType,
sidecarStatus: e2eautoscaling.Disable,
sidecarType: e2eautoscaling.Idle,
}
if resourceType == cpuResource {
st.initCPUTotal = 250
st.cpuBurst = 700
}
if resourceType == memResource {
st.initMemTotal = 250
st.memBurst = 700
}
st.run(name, kind, f)
}
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Scale up on a busy application with an idle sidecar container // Scale up on a busy application with an idle sidecar container
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 10 * time.Minute stasis = 10 * time.Minute
} }
scaleTest := &HPAContainerResourceScaleTest{ st := &HPAContainerResourceScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 125, initCPUTotal: 125,
perContainerCPURequest: 250, perContainerCPURequest: 250,
targetCPUUtilizationPercent: 20, targetValue: 20,
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
firstScale: 3, firstScale: 3,
firstScaleStasis: stasis, firstScaleStasis: stasis,
cpuBurst: 500, cpuBurst: 500,
secondScale: 5, secondScale: 5,
sidecarStatus: e2eautoscaling.Enable, resourceType: resourceType,
sidecarType: e2eautoscaling.Idle, metricTargetType: metricTargetType,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Idle,
} }
scaleTest.run(name, kind, f) st.run(name, kind, f)
} }
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) { func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Do not scale up on a busy sidecar with an idle application // Do not scale up on a busy sidecar with an idle application
stasis := 0 * time.Minute stasis := 0 * time.Minute
if checkStability { if checkStability {
stasis = 1 * time.Minute stasis = 1 * time.Minute
} }
scaleTest := &HPAContainerResourceScaleTest{ st := &HPAContainerResourceScaleTest{
initPods: 1, initPods: 1,
totalInitialCPUUsage: 250, initCPUTotal: 250,
perContainerCPURequest: 500, perContainerCPURequest: 500,
targetCPUUtilizationPercent: 20, targetValue: 20,
minPods: 1, minPods: 1,
maxPods: 5, maxPods: 5,
cpuBurst: 700, cpuBurst: 700,
sidecarStatus: e2eautoscaling.Enable, sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Busy, sidecarType: e2eautoscaling.Busy,
noScale: true, resourceType: resourceType,
noScaleStasis: stasis, metricTargetType: metricTargetType,
noScale: true,
noScaleStasis: stasis,
} }
scaleTest.run(name, kind, f) st.run(name, kind, f)
}
func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 {
if targetType == utilizationMetricType {
return int32(averageUtilizationTarget)
}
return int32(averageValueTarget)
} }

View File

@ -27,7 +27,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
) )
var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod autoscaling (non-default behavior)", func() { var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged

View File

@ -63,6 +63,7 @@ const (
customMetricName = "QPS" customMetricName = "QPS"
serviceInitializationTimeout = 2 * time.Minute serviceInitializationTimeout = 2 * time.Minute
serviceInitializationInterval = 15 * time.Second serviceInitializationInterval = 15 * time.Second
megabytes = 1024 * 1024
) )
var ( var (
@ -90,8 +91,8 @@ const (
) )
/* /*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported) ResourceConsumer is a tool for testing. It helps to create a specified usage of CPU or memory.
typical use case: Typical use case:
rc.ConsumeCPU(600) rc.ConsumeCPU(600)
// ... check your assumption here // ... check your assumption here
rc.ConsumeCPU(300) rc.ConsumeCPU(300)
@ -146,8 +147,8 @@ func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container {
} }
if memLimit > 0 { if memLimit > 0 {
container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI) container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI)
container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI) container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI)
} }
return container return container
@ -617,7 +618,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
} }
func CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler { func CreateHorizontalPodAutoscaler(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, metrics []autoscalingv2.MetricSpec, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{ hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: targetRef.Name, Name: targetRef.Name,
@ -627,35 +628,34 @@ func CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc *ResourceConsumer, t
ScaleTargetRef: targetRef, ScaleTargetRef: targetRef,
MinReplicas: &minReplicas, MinReplicas: &minReplicas,
MaxReplicas: maxReplicas, MaxReplicas: maxReplicas,
Metrics: []autoscalingv2.MetricSpec{ Metrics: metrics,
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpu,
},
},
},
},
}, },
} }
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{}) hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(namespace).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA) framework.ExpectNoError(errHPA)
return hpa return hpa
} }
// CreateCPUHorizontalPodAutoscaler create a horizontalPodAutoscaler with CPU target func CreateResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
// for consuming resources.
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
targetRef := autoscalingv2.CrossVersionObjectReference{ targetRef := autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(), APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind, Kind: rc.kind.Kind,
Name: rc.name, Name: rc.name,
} }
metrics := []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: resourceType,
Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue),
},
},
}
return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas)
}
return CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc, targetRef, rc.nsName, cpu, minReplicas, maxReplicas) func CreateCPUResourceHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
return CreateResourceHorizontalPodAutoscaler(rc, v1.ResourceCPU, autoscalingv2.UtilizationMetricType, cpu, minReplicas, maxReplicas)
} }
// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources. // DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
@ -679,40 +679,23 @@ func runReplicaSet(config testutils.ReplicaSetConfig) error {
return testutils.RunReplicaSet(config) return testutils.RunReplicaSet(config)
} }
// CreateContainerResourceCPUHorizontalPodAutoscaler create a horizontal pod autoscaler with container resource target func CreateContainerResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
// for consuming resources. targetRef := autoscalingv2.CrossVersionObjectReference{
func CreateContainerResourceCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv2.HorizontalPodAutoscaler { APIVersion: rc.kind.GroupVersion().String(),
hpa := &autoscalingv2.HorizontalPodAutoscaler{ Kind: rc.kind.Kind,
ObjectMeta: metav1.ObjectMeta{ Name: rc.name,
Name: rc.name, }
Namespace: rc.nsName, metrics := []autoscalingv2.MetricSpec{
}, {
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{ Type: autoscalingv2.ContainerResourceMetricSourceType,
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
APIVersion: rc.kind.GroupVersion().String(), Name: resourceType,
Kind: rc.kind.Kind, Container: rc.name,
Name: rc.name, Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue),
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
Metrics: []autoscalingv2.MetricSpec{
{
Type: "ContainerResource",
ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
Name: "cpu",
Container: rc.name,
Target: autoscalingv2.MetricTarget{
Type: "Utilization",
AverageUtilization: &cpu,
},
},
},
}, },
}, },
} }
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{}) return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas)
framework.ExpectNoError(errHPA)
return hpa
} }
// DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources. // DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources.
@ -720,6 +703,28 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{}) rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
} }
func CreateMetricTargetWithType(resourceType v1.ResourceName, targetType autoscalingv2.MetricTargetType, targetValue int32) autoscalingv2.MetricTarget {
var metricTarget autoscalingv2.MetricTarget
if targetType == autoscalingv2.UtilizationMetricType {
metricTarget = autoscalingv2.MetricTarget{
Type: targetType,
AverageUtilization: &targetValue,
}
} else if targetType == autoscalingv2.AverageValueMetricType {
var averageValue *resource.Quantity
if resourceType == v1.ResourceCPU {
averageValue = resource.NewMilliQuantity(int64(targetValue), resource.DecimalSI)
} else {
averageValue = resource.NewQuantity(int64(targetValue*megabytes), resource.DecimalSI)
}
metricTarget = autoscalingv2.MetricTarget{
Type: targetType,
AverageValue: averageValue,
}
}
return metricTarget
}
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler { func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{ hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{

View File

@ -53,7 +53,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
f.ScalesGetter, f.ScalesGetter,
e2eautoscaling.Disable, e2eautoscaling.Disable,
e2eautoscaling.Idle) e2eautoscaling.Idle)
t.hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler( t.hpa = e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(
t.rc, t.rc,
20, /* targetCPUUtilizationPercent */ 20, /* targetCPUUtilizationPercent */
1, /* minPods */ 1, /* minPods */