mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
e2e: replace WithTimeout with NodeTimeout
The intend of timeout handling (for the entire "It" and not just a few calls) becomes more obvious and simpler when using ginkgo.NodeTimeout as decorator.
This commit is contained in:
parent
a51999e951
commit
a2722ffa4a
@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
the limitRange by collection with a labelSelector it MUST delete only one
|
||||
limitRange.
|
||||
*/
|
||||
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) {
|
||||
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) {
|
||||
|
||||
ns := f.Namespace.Name
|
||||
lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
|
||||
@ -275,9 +275,6 @@ var _ = SIGDescribe("LimitRange", func() {
|
||||
limitRange2 := &v1.LimitRange{}
|
||||
*limitRange2 = *limitRange
|
||||
|
||||
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
|
||||
defer cancelCtx()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
|
||||
limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)
|
||||
|
@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
||||
|
||||
for _, t := range tests {
|
||||
test := t
|
||||
ginkgo.It(test.name, func(ctx context.Context) {
|
||||
ginkgo.It(test.name, ginkgo.NodeTimeout(csiPodRunningTimeout), func(ctx context.Context) {
|
||||
var err error
|
||||
params := testParameters{
|
||||
lateBinding: test.lateBinding,
|
||||
@ -129,9 +129,6 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
||||
m.init(ctx, params)
|
||||
ginkgo.DeferCleanup(m.cleanup)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
|
||||
defer cancel()
|
||||
|
||||
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even
|
||||
// when the underlying raw watch gets closed prematurely
|
||||
// (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080).
|
||||
@ -323,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
||||
}
|
||||
for _, t := range tests {
|
||||
test := t
|
||||
ginkgo.It(t.name, func(ctx context.Context) {
|
||||
ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) {
|
||||
scName := "mock-csi-storage-capacity-" + f.UniqueName
|
||||
m.init(ctx, testParameters{
|
||||
registerDriver: true,
|
||||
@ -346,7 +343,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
||||
NodeTopology: &metav1.LabelSelector{},
|
||||
Capacity: &capacityQuantity,
|
||||
}
|
||||
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
|
||||
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(ctx, capacity, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
|
||||
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
@ -359,16 +356,14 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
||||
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
|
||||
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
|
||||
|
||||
waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
|
||||
defer cancel()
|
||||
condition := anyOf(
|
||||
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
|
||||
podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace),
|
||||
// We only just created the CSIStorageCapacity objects, therefore
|
||||
// we have to ignore all older events, plus the syncDelay as our
|
||||
// safety margin.
|
||||
podHasStorage(waitCtx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
|
||||
podHasStorage(ctx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
|
||||
)
|
||||
err = wait.PollImmediateUntil(poll, condition, waitCtx.Done())
|
||||
err = wait.PollImmediateUntil(poll, condition, ctx.Done())
|
||||
if test.expectFailure {
|
||||
switch {
|
||||
case errors.Is(err, context.DeadlineExceeded),
|
||||
|
Loading…
Reference in New Issue
Block a user