Merge pull request #70649 from jbartosik/e2e-logging

Add more logging to e2e HPA tests
This commit is contained in:
k8s-ci-robot 2018-11-06 06:06:35 -08:00 committed by GitHub
commit a13599be7e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 15 additions and 4 deletions

View File

@ -120,9 +120,10 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
defer rc.CleanUp() defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait) rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 { if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis) rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
} }
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 { if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst) rc.ConsumeCPU(scaleTest.cpuBurst)

View File

@ -359,6 +359,10 @@ func (rc *ResourceConsumer) GetReplicas() int {
return 0 return 0
} }
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
}
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) { func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) { err := wait.PollImmediate(interval, duration, func() (bool, error) {
@ -369,15 +373,21 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas) framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
} }
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) { func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration) rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
} }
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration) { func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) { err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas() replicas := rc.GetReplicas()
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas) framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
as, err := rc.GetHpa(hpaName)
if err != nil {
framework.Logf("Error getting HPA: %s", err)
} else {
framework.Logf("HPA status: %+v", as.Status)
}
if replicas < minDesiredReplicas { if replicas < minDesiredReplicas {
return false, fmt.Errorf("number of replicas below target") return false, fmt.Errorf("number of replicas below target")
} else if replicas > maxDesiredReplicas { } else if replicas > maxDesiredReplicas {