mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Deflake HPA tests
- Scale down based on custom metric was flaking. Increase target value of the metric. - Scale down based on CPU was flaking during stabilization. Increase tolerance of stabilization (caused by resource consumer using more CPU than requested).
This commit is contained in:
parent
76234a31b0
commit
cb6a8a2564
@ -109,7 +109,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
|
||||
initialReplicas := 2
|
||||
// metric should cause scale down
|
||||
metricValue := externalMetricValue
|
||||
metricAverageTarget := 2 * metricValue
|
||||
metricAverageTarget := (3 * metricValue) / 2
|
||||
metricTargets := map[string]externalMetricTarget{
|
||||
"target_average": {
|
||||
value: metricAverageTarget,
|
||||
|
@ -102,7 +102,7 @@ type HPAScaleTest struct {
|
||||
targetCPUUtilizationPercent int32
|
||||
minPods int32
|
||||
maxPods int32
|
||||
firstScale int32
|
||||
firstScale int
|
||||
firstScaleStasis time.Duration
|
||||
cpuBurst int
|
||||
secondScale int32
|
||||
@ -120,9 +120,9 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
|
||||
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
|
@ -370,12 +370,18 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
|
||||
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration) {
|
||||
interval := 10 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
|
||||
if replicas != desiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas changed unexpectedly")
|
||||
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
|
||||
if replicas < minDesiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas below target")
|
||||
} else if replicas > maxDesiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas above target")
|
||||
} else {
|
||||
return false, nil // Expected number of replicas found. Continue polling until timeout.
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user