mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Add more logging to e2e HPA tests
To debug flakes if there will be any more. Log status of HPA when waching for recommendations stability (place where most flakes seem to happen).
This commit is contained in:
parent
f44a8a4cac
commit
100e61a7d6
@ -120,9 +120,10 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
|
||||
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis)
|
||||
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
|
@ -359,6 +359,10 @@ func (rc *ResourceConsumer) GetReplicas() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
|
||||
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
@ -369,15 +373,21 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
|
||||
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
|
||||
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration)
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
|
||||
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration) {
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
|
||||
interval := 10 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
|
||||
as, err := rc.GetHpa(hpaName)
|
||||
if err != nil {
|
||||
framework.Logf("Error getting HPA: %s", err)
|
||||
} else {
|
||||
framework.Logf("HPA status: %+v", as.Status)
|
||||
}
|
||||
if replicas < minDesiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas below target")
|
||||
} else if replicas > maxDesiredReplicas {
|
||||
|
Loading…
Reference in New Issue
Block a user