From 100e61a7d67bcc8e286cd0d1c665293ab31c2794 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 5 Nov 2018 11:32:41 +0100 Subject: [PATCH] Add more logging to e2e HPA tests To debug flakes if there will be any more. Log status of HPA when waching for recommendations stability (place where most flakes seem to happen). --- .../autoscaling/horizontal_pod_autoscaling.go | 3 ++- test/e2e/common/autoscaling_utils.go | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 0a250d750e6..bdba3f73923 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -120,9 +120,10 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) + rc.WaitForReplicas(scaleTest.firstScale, timeToWait) if scaleTest.firstScaleStasis > 0 { - rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis) + rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name) } if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { rc.ConsumeCPU(scaleTest.cpuBurst) diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 36f5cd8d0da..fe8b5ab97fc 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -359,6 +359,10 @@ func (rc *ResourceConsumer) GetReplicas() int { return 0 } +func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) { + return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{}) +} + func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) { interval := 20 * time.Second err := wait.PollImmediate(interval, duration, func() (bool, error) { @@ -369,15 +373,21 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas) } -func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) { - rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration) +func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) { + rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName) } -func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration) { +func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) { interval := 10 * time.Second err := wait.PollImmediate(interval, duration, func() (bool, error) { replicas := rc.GetReplicas() framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) + as, err := rc.GetHpa(hpaName) + if err != nil { + framework.Logf("Error getting HPA: %s", err) + } else { + framework.Logf("HPA status: %+v", as.Status) + } if replicas < minDesiredReplicas { return false, fmt.Errorf("number of replicas below target") } else if replicas > maxDesiredReplicas {