e2e: replace WithTimeout with NodeTimeout

The intend of timeout handling (for the entire "It" and not just a few calls)
becomes more obvious and simpler when using ginkgo.NodeTimeout as decorator.
This commit is contained in:
Patrick Ohly 2022-12-19 12:46:35 +01:00
parent a51999e951
commit a2722ffa4a
2 changed files with 7 additions and 15 deletions

View File

@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
the limitRange by collection with a labelSelector it MUST delete only one the limitRange by collection with a labelSelector it MUST delete only one
limitRange. limitRange.
*/ */
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) { framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) {
ns := f.Namespace.Name ns := f.Namespace.Name
lrClient := f.ClientSet.CoreV1().LimitRanges(ns) lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
@ -275,9 +275,6 @@ var _ = SIGDescribe("LimitRange", func() {
limitRange2 := &v1.LimitRange{} limitRange2 := &v1.LimitRange{}
*limitRange2 = *limitRange *limitRange2 = *limitRange
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name)) ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{}) limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create limitRange %q", lrName) framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)

View File

@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
for _, t := range tests { for _, t := range tests {
test := t test := t
ginkgo.It(test.name, func(ctx context.Context) { ginkgo.It(test.name, ginkgo.NodeTimeout(csiPodRunningTimeout), func(ctx context.Context) {
var err error var err error
params := testParameters{ params := testParameters{
lateBinding: test.lateBinding, lateBinding: test.lateBinding,
@ -129,9 +129,6 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
m.init(ctx, params) m.init(ctx, params)
ginkgo.DeferCleanup(m.cleanup) ginkgo.DeferCleanup(m.cleanup)
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even // In contrast to the raw watch, RetryWatcher is expected to deliver all events even
// when the underlying raw watch gets closed prematurely // when the underlying raw watch gets closed prematurely
// (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080). // (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080).
@ -323,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
ginkgo.It(t.name, func(ctx context.Context) { ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) {
scName := "mock-csi-storage-capacity-" + f.UniqueName scName := "mock-csi-storage-capacity-" + f.UniqueName
m.init(ctx, testParameters{ m.init(ctx, testParameters{
registerDriver: true, registerDriver: true,
@ -346,7 +343,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
NodeTopology: &metav1.LabelSelector{}, NodeTopology: &metav1.LabelSelector{},
Capacity: &capacityQuantity, Capacity: &capacityQuantity,
} }
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(ctx, capacity, metav1.CreateOptions{})
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{}) ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
} }
@ -359,16 +356,14 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used") framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel()
condition := anyOf( condition := anyOf(
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace), podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace),
// We only just created the CSIStorageCapacity objects, therefore // We only just created the CSIStorageCapacity objects, therefore
// we have to ignore all older events, plus the syncDelay as our // we have to ignore all older events, plus the syncDelay as our
// safety margin. // safety margin.
podHasStorage(waitCtx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)), podHasStorage(ctx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
) )
err = wait.PollImmediateUntil(poll, condition, waitCtx.Done()) err = wait.PollImmediateUntil(poll, condition, ctx.Done())
if test.expectFailure { if test.expectFailure {
switch { switch {
case errors.Is(err, context.DeadlineExceeded), case errors.Is(err, context.DeadlineExceeded),