Merge pull request #112252 from piotrnosek/hpa-average-metrics

Add e2e HPA Tests: CPU and Memory Average with different aggregation methods: Average Value and Utilization
This commit is contained in:
Kubernetes Prow Robot 2022-09-12 09:41:24 -07:00 committed by GitHub
commit d7d82ad972
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 336 additions and 205 deletions

View File

@ -109,7 +109,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
hpa := e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))

View File

@ -19,14 +19,25 @@ package autoscaling
import (
"time"
"github.com/onsi/ginkgo/v2"
"k8s.io/pod-security-admission/api"
autoscalingv2 "k8s.io/api/autoscaling/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
)
"github.com/onsi/ginkgo/v2"
const (
titleUp = "Should scale from 1 pod to 3 pods and then from 3 pods to 5 pods"
titleDown = "Should scale from 5 pods to 3 pods and then from 3 pods to 1 pod"
titleAverageUtilization = " using Average Utilization for aggregation"
titleAverageValue = " using Average Value for aggregation"
valueMetricType = autoscalingv2.AverageValueMetricType
utilizationMetricType = autoscalingv2.UtilizationMetricType
cpuResource = v1.ResourceCPU
memResource = v1.ResourceMemory
)
// These tests don't seem to be running properly in parallel: issue: #20338.
@ -34,109 +45,143 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
ginkgo.Describe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, f)
ginkgo.It(titleDown+titleAverageUtilization, func() {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, cpuResource, valueMetricType, f)
})
})
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() {
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
scaleUp("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, f)
scaleDown("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
})
// These tests take ~20 minutes each.
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", e2eautoscaling.KindRC, true, f)
scaleUp("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", e2eautoscaling.KindRC, true, f)
scaleDown("rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f)
})
})
ginkgo.Describe("ReplicationController light", func() {
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
st := &HPAScaleTest{
initPods: 1,
initCPUTotal: 150,
perPodCPURequest: 200,
targetValue: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
st.run("rc-light", e2eautoscaling.KindRC, f)
})
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
scaleTest := &HPAScaleTest{
initPods: 2,
totalInitialCPUUsage: 50,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 1,
ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func() {
st := &HPAScaleTest{
initPods: 2,
initCPUTotal: 50,
perPodCPURequest: 200,
targetValue: 50,
minPods: 1,
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
st.run("rc-light", e2eautoscaling.KindRC, f)
})
})
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
// ContainerResource CPU autoscaling on idle sidecar
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() {
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, false, f)
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f)
})
// ContainerResource CPU autoscaling on busy sidecar
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() {
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, true, f)
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, true, f)
})
})
ginkgo.Describe("CustomResourceDefinition", func() {
ginkgo.It("Should scale with a CRD targetRef", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
targetRef: e2eautoscaling.CustomCRDTargetRef(),
st := &HPAScaleTest{
initPods: 1,
initCPUTotal: 150,
perPodCPURequest: 200,
targetValue: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
targetRef: e2eautoscaling.CustomCRDTargetRef(),
}
scaleTest.run("crd-light", e2eautoscaling.KindCRD, f)
st.run("crd-light", e2eautoscaling.KindCRD, f)
})
})
})
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: Memory)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = api.LevelBaseline
ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, false, f)
})
})
ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() {
ginkgo.It(titleUp+titleAverageUtilization, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f)
})
ginkgo.It(titleUp+titleAverageValue, func() {
scaleUpContainerResource("test-deployment", e2eautoscaling.KindDeployment, memResource, valueMetricType, f)
})
})
})
// HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct {
initPods int
totalInitialCPUUsage int
perPodCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
firstScale int
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
targetRef autoscalingv2.CrossVersionObjectReference
initPods int
initCPUTotal int
initMemTotal int
perPodCPURequest int64
perPodMemRequest int64
targetValue int32
minPods int32
maxPods int32
firstScale int
firstScaleStasis time.Duration
cpuBurst int
memBurst int
secondScale int32
targetRef autoscalingv2.CrossVersionObjectReference
resourceType v1.ResourceName
metricTargetType autoscalingv2.MetricTargetType
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
@ -144,145 +189,226 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
func (st *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource {
initCPUTotal = st.initCPUTotal
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perPodCPURequest, st.perPodMemRequest, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp()
var hpa *autoscalingv2.HorizontalPodAutoscaler
hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
hpa := e2eautoscaling.CreateResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
rc.WaitForReplicas(st.firstScale, timeToWait)
if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
}
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
func scaleUp(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 700,
secondScale: 5,
st := &HPAScaleTest{
initPods: 1,
perPodCPURequest: 500,
perPodMemRequest: 500,
targetValue: getTargetValueByType(150, 30, metricTargetType),
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
secondScale: 5,
resourceType: resourceType,
metricTargetType: metricTargetType,
}
scaleTest.run(name, kind, f)
if resourceType == cpuResource {
st.initCPUTotal = 250
st.cpuBurst = 700
}
if resourceType == memResource {
st.initMemTotal = 250
st.memBurst = 700
}
st.run(name, kind, f)
}
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
func scaleDown(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 5,
totalInitialCPUUsage: 325,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 30,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 10,
secondScale: 1,
st := &HPAScaleTest{
initPods: 5,
perPodCPURequest: 500,
perPodMemRequest: 500,
targetValue: getTargetValueByType(150, 30, metricTargetType),
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 10,
secondScale: 1,
resourceType: resourceType,
metricTargetType: metricTargetType,
}
scaleTest.run(name, kind, f)
if resourceType == cpuResource {
st.initCPUTotal = 325
st.cpuBurst = 10
}
if resourceType == memResource {
st.initMemTotal = 325
st.memBurst = 10
}
st.run(name, kind, f)
}
type HPAContainerResourceScaleTest struct {
initPods int
totalInitialCPUUsage int
perContainerCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
noScale bool
noScaleStasis time.Duration
firstScale int
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
sidecarStatus e2eautoscaling.SidecarStatusType
sidecarType e2eautoscaling.SidecarWorkloadType
initPods int
initCPUTotal int
initMemTotal int
perContainerCPURequest int64
perContainerMemRequest int64
targetValue int32
minPods int32
maxPods int32
noScale bool
noScaleStasis time.Duration
firstScale int
firstScaleStasis time.Duration
cpuBurst int
memBurst int
secondScale int32
sidecarStatus e2eautoscaling.SidecarStatusType
sidecarType e2eautoscaling.SidecarWorkloadType
resourceType v1.ResourceName
metricTargetType autoscalingv2.MetricTargetType
}
func (scaleTest *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
func (st *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perContainerCPURequest, 200, f.ClientSet, f.ScalesGetter, scaleTest.sidecarStatus, scaleTest.sidecarType)
initCPUTotal, initMemTotal := 0, 0
if st.resourceType == cpuResource {
initCPUTotal = st.initCPUTotal
} else if st.resourceType == memResource {
initMemTotal = st.initMemTotal
}
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, st.initPods, initCPUTotal, initMemTotal, 0, st.perContainerCPURequest, st.perContainerMemRequest, f.ClientSet, f.ScalesGetter, st.sidecarStatus, st.sidecarType)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateContainerResourceCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
hpa := e2eautoscaling.CreateContainerResourceHorizontalPodAutoscaler(rc, st.resourceType, st.metricTargetType, st.targetValue, st.minPods, st.maxPods)
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name)
if scaleTest.noScale {
if scaleTest.noScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.initPods, scaleTest.initPods, scaleTest.noScaleStasis, hpa.Name)
if st.noScale {
if st.noScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.initPods, st.initPods, st.noScaleStasis, hpa.Name)
}
} else {
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
rc.WaitForReplicas(st.firstScale, timeToWait)
if st.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(st.firstScale, st.firstScale+1, st.firstScaleStasis, hpa.Name)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
if st.resourceType == cpuResource && st.cpuBurst > 0 && st.secondScale > 0 {
rc.ConsumeCPU(st.cpuBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
if st.resourceType == memResource && st.memBurst > 0 && st.secondScale > 0 {
rc.ConsumeMem(st.memBurst)
rc.WaitForReplicas(int(st.secondScale), timeToWait)
}
}
}
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
func scaleUpContainerResource(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, f *framework.Framework) {
st := &HPAContainerResourceScaleTest{
initPods: 1,
perContainerCPURequest: 500,
perContainerMemRequest: 500,
targetValue: getTargetValueByType(100, 20, metricTargetType),
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: 0,
secondScale: 5,
resourceType: resourceType,
metricTargetType: metricTargetType,
sidecarStatus: e2eautoscaling.Disable,
sidecarType: e2eautoscaling.Idle,
}
if resourceType == cpuResource {
st.initCPUTotal = 250
st.cpuBurst = 700
}
if resourceType == memResource {
st.initMemTotal = 250
st.memBurst = 700
}
st.run(name, kind, f)
}
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Scale up on a busy application with an idle sidecar container
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAContainerResourceScaleTest{
initPods: 1,
totalInitialCPUUsage: 125,
perContainerCPURequest: 250,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 500,
secondScale: 5,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Idle,
st := &HPAContainerResourceScaleTest{
initPods: 1,
initCPUTotal: 125,
perContainerCPURequest: 250,
targetValue: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 500,
secondScale: 5,
resourceType: resourceType,
metricTargetType: metricTargetType,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Idle,
}
scaleTest.run(name, kind, f)
st.run(name, kind, f)
}
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, checkStability bool, f *framework.Framework) {
// Do not scale up on a busy sidecar with an idle application
stasis := 0 * time.Minute
if checkStability {
stasis = 1 * time.Minute
}
scaleTest := &HPAContainerResourceScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perContainerCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
cpuBurst: 700,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Busy,
noScale: true,
noScaleStasis: stasis,
st := &HPAContainerResourceScaleTest{
initPods: 1,
initCPUTotal: 250,
perContainerCPURequest: 500,
targetValue: 20,
minPods: 1,
maxPods: 5,
cpuBurst: 700,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Busy,
resourceType: resourceType,
metricTargetType: metricTargetType,
noScale: true,
noScaleStasis: stasis,
}
scaleTest.run(name, kind, f)
st.run(name, kind, f)
}
func getTargetValueByType(averageValueTarget, averageUtilizationTarget int, targetType autoscalingv2.MetricTargetType) int32 {
if targetType == utilizationMetricType {
return int32(averageUtilizationTarget)
}
return int32(averageValueTarget)
}

View File

@ -27,7 +27,7 @@ import (
"github.com/onsi/ginkgo/v2"
)
var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] [Feature:BHPA] Horizontal pod autoscaling (non-default behavior)", func() {
var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged

View File

@ -63,6 +63,7 @@ const (
customMetricName = "QPS"
serviceInitializationTimeout = 2 * time.Minute
serviceInitializationInterval = 15 * time.Second
megabytes = 1024 * 1024
)
var (
@ -90,8 +91,8 @@ const (
)
/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
ResourceConsumer is a tool for testing. It helps to create a specified usage of CPU or memory.
Typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
rc.ConsumeCPU(300)
@ -146,8 +147,8 @@ func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container {
}
if memLimit > 0 {
container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI)
container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI)
container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI)
container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*megabytes, resource.DecimalSI)
}
return container
@ -617,7 +618,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
func CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
func CreateHorizontalPodAutoscaler(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, metrics []autoscalingv2.MetricSpec, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: targetRef.Name,
@ -627,35 +628,34 @@ func CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc *ResourceConsumer, t
ScaleTargetRef: targetRef,
MinReplicas: &minReplicas,
MaxReplicas: maxReplicas,
Metrics: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpu,
},
},
},
},
Metrics: metrics,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(namespace).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
}
// CreateCPUHorizontalPodAutoscaler create a horizontalPodAutoscaler with CPU target
// for consuming resources.
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
func CreateResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
targetRef := autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
}
metrics := []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: resourceType,
Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue),
},
},
}
return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas)
}
return CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc, targetRef, rc.nsName, cpu, minReplicas, maxReplicas)
func CreateCPUResourceHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
return CreateResourceHorizontalPodAutoscaler(rc, v1.ResourceCPU, autoscalingv2.UtilizationMetricType, cpu, minReplicas, maxReplicas)
}
// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
@ -679,40 +679,23 @@ func runReplicaSet(config testutils.ReplicaSetConfig) error {
return testutils.RunReplicaSet(config)
}
// CreateContainerResourceCPUHorizontalPodAutoscaler create a horizontal pod autoscaler with container resource target
// for consuming resources.
func CreateContainerResourceCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
Metrics: []autoscalingv2.MetricSpec{
{
Type: "ContainerResource",
ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
Name: "cpu",
Container: rc.name,
Target: autoscalingv2.MetricTarget{
Type: "Utilization",
AverageUtilization: &cpu,
},
},
},
func CreateContainerResourceHorizontalPodAutoscaler(rc *ResourceConsumer, resourceType v1.ResourceName, metricTargetType autoscalingv2.MetricTargetType, metricTargetValue, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
targetRef := autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
}
metrics := []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ContainerResourceMetricSourceType,
ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
Name: resourceType,
Container: rc.name,
Target: CreateMetricTargetWithType(resourceType, metricTargetType, metricTargetValue),
},
},
}
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
return CreateHorizontalPodAutoscaler(rc, targetRef, rc.nsName, metrics, resourceType, metricTargetType, metricTargetValue, minReplicas, maxReplicas)
}
// DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources.
@ -720,6 +703,28 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
}
func CreateMetricTargetWithType(resourceType v1.ResourceName, targetType autoscalingv2.MetricTargetType, targetValue int32) autoscalingv2.MetricTarget {
var metricTarget autoscalingv2.MetricTarget
if targetType == autoscalingv2.UtilizationMetricType {
metricTarget = autoscalingv2.MetricTarget{
Type: targetType,
AverageUtilization: &targetValue,
}
} else if targetType == autoscalingv2.AverageValueMetricType {
var averageValue *resource.Quantity
if resourceType == v1.ResourceCPU {
averageValue = resource.NewMilliQuantity(int64(targetValue), resource.DecimalSI)
} else {
averageValue = resource.NewQuantity(int64(targetValue*megabytes), resource.DecimalSI)
}
metricTarget = autoscalingv2.MetricTarget{
Type: targetType,
AverageValue: averageValue,
}
}
return metricTarget
}
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{

View File

@ -53,7 +53,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
f.ScalesGetter,
e2eautoscaling.Disable,
e2eautoscaling.Idle)
t.hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(
t.hpa = e2eautoscaling.CreateCPUResourceHorizontalPodAutoscaler(
t.rc,
20, /* targetCPUUtilizationPercent */
1, /* minPods */