mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Use goroutine to speed up volume clenaups
This removes WaitTimeoutForPodNoLongerRunningOrNotFoundInNamespace
introduced in f2b9479f8e
and changes
the test to use goroutines to speed up the cleanups.
This commit is contained in:
parent
77b027936a
commit
0253397fbd
@ -504,25 +504,6 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name
|
||||
})
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodNoLongerRunningOrNotFoundInNamespace waits default amount of time (defaultPodDeletionTimeout)
|
||||
// for the specified pod to stop running or disappear. Returns an error if timeout occurs first.
|
||||
func WaitTimeoutForPodNoLongerRunningOrNotFoundInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return wait.PollImmediate(poll, defaultPodDeletionTimeout, func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(namespace, podName))
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
|
@ -19,10 +19,13 @@ package storage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
@ -92,12 +95,21 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
for _, config := range configs {
|
||||
e2epod.DeletePodOrFail(c, ns, config.pod.Name)
|
||||
}
|
||||
for _, config := range configs {
|
||||
e2epod.WaitTimeoutForPodNoLongerRunningOrNotFoundInNamespace(c, config.pod.Name, ns)
|
||||
e2epv.PVPVCCleanup(c, ns, config.pv, config.pvc)
|
||||
err = e2epv.DeletePVSource(config.pvSource)
|
||||
framework.ExpectNoError(err)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(configs))
|
||||
for i := range configs {
|
||||
go func(config *staticPVTestConfig) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
err := e2epod.WaitForPodToDisappear(c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete)
|
||||
framework.ExpectNoError(err, "while waiting for pod to disappear")
|
||||
errs := e2epv.PVPVCCleanup(c, ns, config.pv, config.pvc)
|
||||
framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs")
|
||||
err = e2epv.DeletePVSource(config.pvSource)
|
||||
framework.ExpectNoError(err, "while deleting PVSource")
|
||||
}(configs[i])
|
||||
}
|
||||
wg.Wait()
|
||||
}()
|
||||
|
||||
for i, config := range configs {
|
||||
|
Loading…
Reference in New Issue
Block a user