From 5130db416d9353c65e1aa03d016b594ef401f105 Mon Sep 17 00:00:00 2001 From: Chris Henzie Date: Fri, 19 Feb 2021 12:51:05 -0800 Subject: [PATCH] Clean up e2e stress test resources concurrently --- .../testsuites/snapshottable_stress.go | 27 +++++----- test/e2e/storage/testsuites/volume_stress.go | 50 ++++++++++++++----- 2 files changed, 49 insertions(+), 28 deletions(-) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index db81666d677..392e51e9c57 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -194,10 +194,9 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD wg sync.WaitGroup ) - for i, snapshot := range stressTest.snapshots { - wg.Add(1) - - go func(i int, snapshot *storageframework.SnapshotResource) { + wg.Add(len(stressTest.snapshots)) + for _, snapshot := range stressTest.snapshots { + go func(snapshot *storageframework.SnapshotResource) { defer ginkgo.GinkgoRecover() defer wg.Done() @@ -206,14 +205,13 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD mu.Lock() defer mu.Unlock() errs = append(errs, err) - }(i, snapshot) + }(snapshot) } wg.Wait() - for i, pod := range stressTest.pods { - wg.Add(1) - - go func(i int, pod *v1.Pod) { + wg.Add(len(stressTest.pods)) + for _, pod := range stressTest.pods { + go func(pod *v1.Pod) { defer ginkgo.GinkgoRecover() defer wg.Done() @@ -222,14 +220,13 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD mu.Lock() defer mu.Unlock() errs = append(errs, err) - }(i, pod) + }(pod) } wg.Wait() - for i, volume := range stressTest.volumes { - wg.Add(1) - - go func(i int, volume *storageframework.VolumeResource) { + wg.Add(len(stressTest.volumes)) + for _, volume := range stressTest.volumes { + go func(volume *storageframework.VolumeResource) { defer ginkgo.GinkgoRecover() defer wg.Done() @@ -238,7 +235,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD mu.Lock() defer mu.Unlock() errs = append(errs, err) - }(i, volume) + }(volume) } wg.Wait() diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index 801e8186968..5741f50fe9a 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -46,8 +46,8 @@ type volumeStressTest struct { migrationCheck *migrationOpCheck - resources []*storageframework.VolumeResource - pods []*v1.Pod + volumes []*storageframework.VolumeResource + pods []*v1.Pod // stop and wait for any async routines wg sync.WaitGroup ctx context.Context @@ -121,7 +121,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, // Now do the more expensive test initialization. l.config, l.driverCleanup = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName) - l.resources = []*storageframework.VolumeResource{} + l.volumes = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} l.testOptions = *dInfo.StressTestOptions l.ctx, l.cancel = context.WithCancel(context.Background()) @@ -131,7 +131,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, for i := 0; i < l.testOptions.NumPods; i++ { framework.Logf("Creating resources for pod %v/%v", i, l.testOptions.NumPods-1) r := storageframework.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange) - l.resources = append(l.resources, r) + l.volumes = append(l.volumes, r) podConfig := e2epod.Config{ NS: f.Namespace.Name, PVCs: []*v1.PersistentVolumeClaim{r.Pvc}, @@ -145,21 +145,45 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, } cleanup := func() { - var errs []error - framework.Logf("Stopping and waiting for all test routines to finish") l.cancel() l.wg.Wait() - for _, pod := range l.pods { - framework.Logf("Deleting pod %v", pod.Name) - err := e2epod.DeletePodWithWait(cs, pod) - errs = append(errs, err) - } + var ( + errs []error + mu sync.Mutex + wg sync.WaitGroup + ) - for _, resource := range l.resources { - errs = append(errs, resource.CleanupResource()) + wg.Add(len(l.pods)) + for _, pod := range l.pods { + go func(pod *v1.Pod) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + + framework.Logf("Deleting pod %v", pod.Name) + err := e2epod.DeletePodWithWait(cs, pod) + mu.Lock() + defer mu.Unlock() + errs = append(errs, err) + }(pod) } + wg.Wait() + + wg.Add(len(l.volumes)) + for _, volume := range l.volumes { + go func(volume *storageframework.VolumeResource) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + + framework.Logf("Deleting volume %s", volume.Pvc.GetName()) + err := volume.CleanupResource() + mu.Lock() + defer mu.Unlock() + errs = append(errs, err) + }(volume) + } + wg.Wait() errs = append(errs, storageutils.TryFunc(l.driverCleanup)) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")