Merge pull request #67712 from shyamjvs/parallelize-density-test-deletion

Automatic merge from submit-queue (batch tested with PRs 63757, 67698, 67712, 67494, 67700). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Parallelize RC deletion in density test

Hoping this helps with https://github.com/kubernetes/test-infra/issues/8348. If the pod deletions indeed happen in parallel, this may cut ~5 mins from density test in kubemark-500.

/cc @wojtek-t 

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue
2018-08-22 14:17:21 -07:00
committed by GitHub

View File

@@ -357,14 +357,23 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
By("Deleting created Collections")
numberOfClients := len(dtc.ClientSets)
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
client := dtc.ClientSets[i%numberOfClients]
go func() {
defer GinkgoRecover()
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg.Done()
err := framework.DeleteResourceAndWaitForGC(client, kind, namespace, name)
framework.ExpectNoError(err)
}()
}
wg.Wait()
podCleanupPhase.End()
dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))