From 5c09ca57ffb6a7e797068d28141182443dae2260 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 12 Dec 2022 09:16:17 +0100 Subject: [PATCH] e2e storage: remove context.WithCancel The context provided by Ginkgo will get cancelled automatically. --- .../testsuites/snapshottable_stress.go | 20 ++++++++-------- test/e2e/storage/testsuites/volume_stress.go | 24 +++++++++---------- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index 9e99ca0b242..531d415d5ad 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -54,9 +54,7 @@ type snapshottableStressTest struct { snapshotsMutex sync.Mutex // Stop and wait for any async routines. - ctx context.Context - wg sync.WaitGroup - cancel context.CancelFunc + wg sync.WaitGroup } // InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface @@ -129,7 +127,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver) cs = f.ClientSet config := driver.PrepareTest(f) - ctx, cancel := context.WithCancel(context.Background()) stressTest = &snapshottableStressTest{ config: config, @@ -137,8 +134,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD snapshots: []*storageframework.SnapshotResource{}, pods: []*v1.Pod{}, testOptions: *driverInfo.VolumeSnapshotStressTestOptions, - ctx: ctx, - cancel: cancel, } } @@ -169,7 +164,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD defer wg.Done() if _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { - stressTest.cancel() framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err) } }(i, pod) @@ -178,7 +172,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD for i, pod := range stressTest.pods { if err := e2epod.WaitForPodRunningInNamespace(cs, pod); err != nil { - stressTest.cancel() framework.Failf("Failed to wait for pod-%d [%+v] turn into running status. Error: %v", i, pod, err) } } @@ -186,7 +179,6 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD cleanup := func() { framework.Logf("Stopping and waiting for all test routines to finish") - stressTest.cancel() stressTest.wg.Wait() var ( @@ -265,7 +257,15 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD volume := stressTest.volumes[podIndex] select { - case <-stressTest.ctx.Done(): + case <-ctx.Done(): + // This looks like a in the + // original test + // (https://github.com/kubernetes/kubernetes/blob/21049c2a1234ae3eea57357ed4329ed567a2dab3/test/e2e/storage/testsuites/snapshottable_stress.go#L269): + // This early return will never + // get reached even if some + // other goroutine fails + // because the context doesn't + // get cancelled. return default: framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1) diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index 17fd6f0ec1b..a9e42fb65b0 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -48,9 +48,7 @@ type volumeStressTest struct { volumes []*storageframework.VolumeResource pods []*v1.Pod // stop and wait for any async routines - wg sync.WaitGroup - ctx context.Context - cancel context.CancelFunc + wg sync.WaitGroup testOptions storageframework.StressTestOptions } @@ -124,7 +122,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, l.volumes = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} l.testOptions = *dInfo.StressTestOptions - l.ctx, l.cancel = context.WithCancel(context.Background()) } createPodsAndVolumes := func() { @@ -146,7 +143,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, cleanup := func() { framework.Logf("Stopping and waiting for all test routines to finish") - l.cancel() l.wg.Wait() var ( @@ -189,13 +185,10 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, l.migrationCheck.validateMigrationVolumeOpCounts() } - ginkgo.BeforeEach(func() { + ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { init() ginkgo.DeferCleanup(cleanup) createPodsAndVolumes() - }) - - ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { // Restart pod repeatedly for i := 0; i < l.testOptions.NumPods; i++ { podIndex := i @@ -205,20 +198,26 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, defer l.wg.Done() for j := 0; j < l.testOptions.NumRestarts; j++ { select { - case <-l.ctx.Done(): + case <-ctx.Done(): + // This looks like a in the + // original test + // (https://github.com/kubernetes/kubernetes/blob/21049c2a1234ae3eea57357ed4329ed567a2dab3/test/e2e/storage/testsuites/volume_stress.go#L212): + // This early return will never + // get reached even if some + // other goroutine fails + // because the context doesn't + // get cancelled. return default: pod := l.pods[podIndex] framework.Logf("Pod-%v [%v], Iteration %v/%v", podIndex, pod.Name, j, l.testOptions.NumRestarts-1) _, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { - l.cancel() framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err) } err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart) if err != nil { - l.cancel() framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) } @@ -226,7 +225,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, err = e2epod.DeletePodWithWait(f.ClientSet, pod) if err != nil { - l.cancel() framework.Failf("Failed to delete pod-%v [%+v]. Error: %v", podIndex, pod, err) } }