mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #110664 from jbartosik/rollback-failing-hpa-e2e
Revert "Add e2e test for HPA behavior: scale up delay"
This commit is contained in:
commit
a9f83d839e
@ -17,9 +17,8 @@ limitations under the License.
|
|||||||
package autoscaling
|
package autoscaling
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/pod-security-admission/api"
|
"k8s.io/pod-security-admission/api"
|
||||||
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
@ -48,7 +48,7 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
|||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
|
|
||||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
||||||
rc, int32(targetCPUUtilizationPercent), 1, 5, e2eautoscaling.HPABehaviorWithDownscaleStabilizationSeconds(int32(downScaleStabilization.Seconds())),
|
rc, int32(targetCPUUtilizationPercent), 1, 5, int32(downScaleStabilization.Seconds()),
|
||||||
)
|
)
|
||||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
||||||
|
|
||||||
@ -86,61 +86,4 @@ var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (n
|
|||||||
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
|
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Describe("with long upscale stabilization window", func() {
|
|
||||||
ginkgo.It("should scale up only after the stabilization period", func() {
|
|
||||||
ginkgo.By("setting up resource consumer and HPA")
|
|
||||||
podCPURequest := 500
|
|
||||||
targetCPUUtilizationPercent := 25
|
|
||||||
usageForSingleReplica := 110
|
|
||||||
initPods := 4
|
|
||||||
initCPUUsageTotal := initPods * usageForSingleReplica
|
|
||||||
upScaleStabilization := 3 * time.Minute
|
|
||||||
|
|
||||||
rc := e2eautoscaling.NewDynamicResourceConsumer(
|
|
||||||
"consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
|
|
||||||
initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
|
|
||||||
f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
|
|
||||||
)
|
|
||||||
defer rc.CleanUp()
|
|
||||||
|
|
||||||
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(
|
|
||||||
rc, int32(targetCPUUtilizationPercent), 1, 5, e2eautoscaling.HPABehaviorWithUpscaleStabilizationSeconds(int32(upScaleStabilization.Seconds())),
|
|
||||||
)
|
|
||||||
defer e2eautoscaling.DeleteHPAWithBehavior(rc, hpa.Name)
|
|
||||||
|
|
||||||
fullWindowOfNewUsage := 30 * time.Second
|
|
||||||
windowWithOldUsagePasses := 30 * time.Second
|
|
||||||
newPodMetricsDelay := 15 * time.Second
|
|
||||||
metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
|
|
||||||
|
|
||||||
hpaReconciliationInterval := 15 * time.Second
|
|
||||||
actuationDelay := 10 * time.Second
|
|
||||||
maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
|
|
||||||
|
|
||||||
maxConsumeCPUDelay := 30 * time.Second
|
|
||||||
waitForReplicasPollInterval := 20 * time.Second
|
|
||||||
maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
|
|
||||||
|
|
||||||
waitBuffer := 1 * time.Minute
|
|
||||||
|
|
||||||
// making sure HPA is ready, doing its job and already has a recommendation recorded
|
|
||||||
// for stabilization logic before increasing the consumption
|
|
||||||
ginkgo.By("triggering scale down to record a recommendation")
|
|
||||||
rc.ConsumeCPU(2 * usageForSingleReplica)
|
|
||||||
rc.WaitForReplicas(2, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
|
|
||||||
|
|
||||||
ginkgo.By("triggering scale up by increasing consumption")
|
|
||||||
rc.ConsumeCPU(5 * usageForSingleReplica)
|
|
||||||
waitStart := time.Now()
|
|
||||||
rc.WaitForReplicas(5, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
|
|
||||||
timeWaited := time.Now().Sub(waitStart)
|
|
||||||
|
|
||||||
ginkgo.By("verifying time waited for a scale up")
|
|
||||||
framework.Logf("time waited for scale up: %s", timeWaited)
|
|
||||||
framework.ExpectEqual(timeWaited > upScaleStabilization, true, "waited %s, wanted more than %s", timeWaited, upScaleStabilization)
|
|
||||||
deadline := upScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
|
|
||||||
framework.ExpectEqual(timeWaited < deadline, true, "waited %s, wanted less than %s", timeWaited, deadline)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
@ -684,7 +684,7 @@ func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
|
|||||||
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int32, minReplicas int32, maxRepl int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv2.HorizontalPodAutoscaler {
|
func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu, minReplicas, maxRepl, downscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscaler {
|
||||||
hpa := &autoscalingv2.HorizontalPodAutoscaler{
|
hpa := &autoscalingv2.HorizontalPodAutoscaler{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: rc.name,
|
Name: rc.name,
|
||||||
@ -710,7 +710,11 @@ func CreateCPUHorizontalPodAutoscalerWithBehavior(rc *ResourceConsumer, cpu int3
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Behavior: behavior,
|
Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
||||||
|
ScaleDown: &autoscalingv2.HPAScalingRules{
|
||||||
|
StabilizationWindowSeconds: &downscaleStabilizationSeconds,
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
|
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
|
||||||
@ -722,22 +726,6 @@ func DeleteHPAWithBehavior(rc *ResourceConsumer, autoscalerName string) {
|
|||||||
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func HPABehaviorWithDownscaleStabilizationSeconds(downscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
|
||||||
return &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
|
||||||
ScaleDown: &autoscalingv2.HPAScalingRules{
|
|
||||||
StabilizationWindowSeconds: &downscaleStabilizationSeconds,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func HPABehaviorWithUpscaleStabilizationSeconds(upscaleStabilizationSeconds int32) *autoscalingv2.HorizontalPodAutoscalerBehavior {
|
|
||||||
return &autoscalingv2.HorizontalPodAutoscalerBehavior{
|
|
||||||
ScaleUp: &autoscalingv2.HPAScalingRules{
|
|
||||||
StabilizationWindowSeconds: &upscaleStabilizationSeconds,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//SidecarStatusType type for sidecar status
|
//SidecarStatusType type for sidecar status
|
||||||
type SidecarStatusType bool
|
type SidecarStatusType bool
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user