mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-17 07:03:31 +00:00
test: e2e: HPA ContainerResource
This add e2e test for HPA ContainerResource metrics. This add test to cover two scenarios 1. Scale up on a busy application with an idle sidecar container 2. Do not scale up on a busy sidecar with an idle application. Signed-off-by: Vivek Singh <svivekkumar@vmware.com>
This commit is contained in:
@@ -21,7 +21,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
|
||||
@@ -96,7 +96,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
|
||||
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
|
@@ -91,6 +91,18 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
||||
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
|
||||
// ContainerResource CPU autoscaling on idle sidecar
|
||||
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() {
|
||||
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, false, f)
|
||||
})
|
||||
|
||||
// ContainerResource CPU autoscaling on busy sidecar
|
||||
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() {
|
||||
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, true, f)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// HPAScaleTest struct is used by the scale(...) function.
|
||||
@@ -114,7 +126,7 @@ type HPAScaleTest struct {
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
|
||||
defer rc.CleanUp()
|
||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
@@ -168,3 +180,88 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f
|
||||
}
|
||||
scaleTest.run(name, kind, f)
|
||||
}
|
||||
|
||||
type HPAContainerResourceScaleTest struct {
|
||||
initPods int
|
||||
totalInitialCPUUsage int
|
||||
perContainerCPURequest int64
|
||||
targetCPUUtilizationPercent int32
|
||||
minPods int32
|
||||
maxPods int32
|
||||
noScale bool
|
||||
noScaleStasis time.Duration
|
||||
firstScale int
|
||||
firstScaleStasis time.Duration
|
||||
cpuBurst int
|
||||
secondScale int32
|
||||
sidecarStatus e2eautoscaling.SidecarStatusType
|
||||
sidecarType e2eautoscaling.SidecarWorkloadType
|
||||
}
|
||||
|
||||
func (scaleTest *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perContainerCPURequest, 200, f.ClientSet, f.ScalesGetter, scaleTest.sidecarStatus, scaleTest.sidecarType)
|
||||
defer rc.CleanUp()
|
||||
hpa := e2eautoscaling.CreateContainerResourceCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name)
|
||||
|
||||
if scaleTest.noScale {
|
||||
if scaleTest.noScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.initPods, scaleTest.initPods, scaleTest.noScaleStasis, hpa.Name)
|
||||
}
|
||||
} else {
|
||||
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
|
||||
// Scale up on a busy application with an idle sidecar container
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 10 * time.Minute
|
||||
}
|
||||
scaleTest := &HPAContainerResourceScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 250,
|
||||
perContainerCPURequest: 500,
|
||||
targetCPUUtilizationPercent: 20,
|
||||
minPods: 1,
|
||||
maxPods: 5,
|
||||
firstScale: 3,
|
||||
firstScaleStasis: stasis,
|
||||
cpuBurst: 700,
|
||||
secondScale: 5,
|
||||
sidecarStatus: e2eautoscaling.Enable,
|
||||
sidecarType: e2eautoscaling.Idle,
|
||||
}
|
||||
scaleTest.run(name, kind, f)
|
||||
}
|
||||
|
||||
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
|
||||
// Do not scale up on a busy sidecar with an idle application
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 1 * time.Minute
|
||||
}
|
||||
scaleTest := &HPAContainerResourceScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 250,
|
||||
perContainerCPURequest: 500,
|
||||
targetCPUUtilizationPercent: 20,
|
||||
minPods: 1,
|
||||
maxPods: 5,
|
||||
cpuBurst: 700,
|
||||
sidecarStatus: e2eautoscaling.Enable,
|
||||
sidecarType: e2eautoscaling.Busy,
|
||||
noScale: true,
|
||||
noScaleStasis: stasis,
|
||||
}
|
||||
scaleTest.run(name, kind, f)
|
||||
}
|
||||
|
Reference in New Issue
Block a user