Merge pull request #108264 from pbetkier/hpa-behavior-e2e-tests-downscale

Add e2e test for HPA behavior: decreased downscale stabilization
This commit is contained in:
Kubernetes Prow Robot 2022-03-14 14:23:00 -07:00 committed by GitHub
commit 5c98eb5625
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 129 additions and 0 deletions

View File

@ -0,0 +1,87 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoscaling
import (
"time"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
"github.com/onsi/ginkgo"
)
var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior)", func() {
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
ginkgo.Describe("with short downscale stabilization window", func() {
ginkgo.It("should scale down soon after the stabilization period", func() {
ginkgo.By("setting up resource consumer and HPA")
podCPURequest := 500
targetCPUUtilizationPercent := 25
usageForSingleReplica := 110
initPods := 1
initCPUUsageTotal := initPods * usageForSingleReplica
downScaleStabilization := 1 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
rc, int32(targetCPUUtilizationPercent), 1, 5, int32(downScaleStabilization.Seconds()),
)
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
fullWindowOfNewUsage := 30 * time.Second
windowWithOldUsagePasses := 30 * time.Second
newPodMetricsDelay := 15 * time.Second
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
hpaReconciliationInterval := 15 * time.Second
actuationDelay := 10 * time.Second
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
maxConsumeCPUDelay := 30 * time.Second
waitForReplicasPollInterval := 20 * time.Second
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
waitBuffer := 1 * time.Minute
// making sure HPA is ready, doing its job and already has a recommendation recorded
// for stabilization logic before lowering the consumption
ginkgo.By("triggering scale up to record a recommendation")
rc.ConsumeCPU(3 * usageForSingleReplica)
rc.WaitForReplicas(3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
ginkgo.By("triggering scale down by lowering consumption")
rc.ConsumeCPU(2 * usageForSingleReplica)
waitStart := time.Now()
rc.WaitForReplicas(2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
timeWaited := time.Now().Sub(waitStart)
ginkgo.By("verifying time waited for a scale down")
framework.Logf("time waited for scale down: %s", timeWaited)
framework.ExpectEqual(timeWaited > downScaleStabilization, true, "waited %s, wanted more than %s", timeWaited, downScaleStabilization)
deadline := downScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
})
})
})

View File

@ -667,6 +667,48 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
}
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, minReplicas, maxRepl, downscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
Metrics: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpu,
},
},
},
},
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
ScaleDown: &autoscalingv2.HPAScalingRules{
StabilizationWindowSeconds: &downscaleStabilizationSeconds,
},
},
},
}
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
}
func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
}
//SidecarStatusType type for sidecar status
type SidecarStatusType bool