e2e storage: fix "Stress with local volumes [Serial] should be able to process many pods and reuse local volumes"

The background goroutine was started with the context from ginkgo.BeforeEach,
which then led to "context canceled" errors. While at it, the entire goroutine
start/stop gets moved into the BeforeEach and simplified.
This commit is contained in:
Patrick Ohly 2022-12-19 11:23:53 +01:00
parent 2ca74f2885
commit 4d7e2894b2

View File

@ -445,8 +445,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
var ( var (
allLocalVolumes = make(map[string][]*localTestVolume) allLocalVolumes = make(map[string][]*localTestVolume)
volType = TmpfsLocalVolumeType volType = TmpfsLocalVolumeType
stopCh = make(chan struct{})
wg sync.WaitGroup
) )
const ( const (
@ -471,11 +469,18 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
} }
} }
ginkgo.By("Start a goroutine to recycle unbound PVs") ginkgo.By("Start a goroutine to recycle unbound PVs")
backgroundCtx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
ginkgo.DeferCleanup(func() {
ginkgo.By("Stop and wait for recycle goroutine to finish")
cancel()
wg.Wait()
})
go func() { go func() {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
defer wg.Done() defer wg.Done()
w, err := config.client.CoreV1().PersistentVolumes().Watch(ctx, metav1.ListOptions{}) w, err := config.client.CoreV1().PersistentVolumes().Watch(backgroundCtx, metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if w == nil { if w == nil {
return return
@ -494,7 +499,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
if pv.Status.Phase == v1.VolumeBound || pv.Status.Phase == v1.VolumeAvailable { if pv.Status.Phase == v1.VolumeBound || pv.Status.Phase == v1.VolumeAvailable {
continue continue
} }
pv, err = config.client.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) pv, err = config.client.CoreV1().PersistentVolumes().Get(backgroundCtx, pv.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
continue continue
} }
@ -505,14 +510,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
if localVolume.pv.Name != pv.Name { if localVolume.pv.Name != pv.Name {
continue continue
} }
err = config.client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{}) err = config.client.CoreV1().PersistentVolumes().Delete(backgroundCtx, pv.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvConfig := makeLocalPVConfig(config, localVolume) pvConfig := makeLocalPVConfig(config, localVolume)
localVolume.pv, err = e2epv.CreatePV(ctx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig)) localVolume.pv, err = e2epv.CreatePV(backgroundCtx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig))
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
case <-stopCh: case <-backgroundCtx.Done():
return return
} }
} }
@ -520,9 +525,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}) })
ginkgo.AfterEach(func(ctx context.Context) { ginkgo.AfterEach(func(ctx context.Context) {
ginkgo.By("Stop and wait for recycle goroutine to finish")
close(stopCh)
wg.Wait()
ginkgo.By("Clean all PVs") ginkgo.By("Clean all PVs")
for nodeName, localVolumes := range allLocalVolumes { for nodeName, localVolumes := range allLocalVolumes {
ginkgo.By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName)) ginkgo.By(fmt.Sprintf("Cleaning up %d local volumes on node %q", len(localVolumes), nodeName))