Merge pull request #123473 from dims/fix-for-persistent-horizontal-pod-autoscaling-failures

Fix for persistent Horizontal pod autoscaling failures
This commit is contained in:
Kubernetes Prow Robot 2024-02-24 05:51:16 -08:00 committed by GitHub
commit 4c15f5f7b0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -20,6 +20,7 @@ import (
"context"
"fmt"
"math"
"strings"
"time"
gcm "google.golang.org/api/monitoring/v3"
@ -614,7 +615,7 @@ func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace
err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(ctx, deploymentName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
return true, err
}
replicas := int(deployment.Status.ReadyReplicas)
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
@ -627,7 +628,7 @@ func ensureDesiredReplicasInRange(ctx context.Context, deploymentName, namespace
}
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if wait.Interrupted(err) {
if wait.Interrupted(err) || strings.Contains(err.Error(), "would exceed context deadline") {
framework.Logf("Number of replicas was stable over %v", timeout)
return
}