Parallelize RC deletion in density test

This commit is contained in:
Shyam Jeedigunta 2018-08-22 16:30:01 +02:00
parent 0e31372b2f
commit c07ecad613

View File

@ -357,14 +357,23 @@ func cleanupDensityTest(dtc DensityTestConfig, testPhaseDurations *timer.TestPha
By("Deleting created Collections")
numberOfClients := len(dtc.ClientSets)
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSets[i%numberOfClients], kind, namespace, name)
framework.ExpectNoError(err)
client := dtc.ClientSets[i%numberOfClients]
go func() {
defer GinkgoRecover()
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg.Done()
err := framework.DeleteResourceAndWaitForGC(client, kind, namespace, name)
framework.ExpectNoError(err)
}()
}
wg.Wait()
podCleanupPhase.End()
dtc.deleteSecrets(testPhaseDurations.StartPhase(910, "secrets deletion"))