From a51999e9511e048b2fa21b6cf1f33936bf223bab Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 19 Dec 2022 12:44:44 +0100 Subject: [PATCH 1/3] e2e framework: support Ginkgo decorators for framework.ConformanceIt Additional parameters like ginkgo.SpecTimeout may also be relevant for conformance tests. --- test/e2e/framework/framework.go | 5 ----- test/e2e/framework/ginkgowrapper.go | 7 +++++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index c8ba3aa0879..3cee43cb226 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -529,11 +529,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster { return nil } -// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. -func ConformanceIt(text string, body interface{}) bool { - return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body) -} - // PodStateVerification represents a verification of pod state. // Any time you have a set of pods that you want to operate against or query, // this struct can be used to declaratively identify those pods. diff --git a/test/e2e/framework/ginkgowrapper.go b/test/e2e/framework/ginkgowrapper.go index ebeea8acce9..e35fc4ae982 100644 --- a/test/e2e/framework/ginkgowrapper.go +++ b/test/e2e/framework/ginkgowrapper.go @@ -20,6 +20,7 @@ import ( "path" "reflect" + "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -63,3 +64,9 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String()) return codeLocation } + +// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier. +func ConformanceIt(text string, args ...interface{}) bool { + args = append(args, ginkgo.Offset(1)) + return ginkgo.It(text+" [Conformance]", args...) +} From a2722ffa4a238ebe3c653e915d944ba18dafa1cd Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 19 Dec 2022 12:46:35 +0100 Subject: [PATCH 2/3] e2e: replace WithTimeout with NodeTimeout The intend of timeout handling (for the entire "It" and not just a few calls) becomes more obvious and simpler when using ginkgo.NodeTimeout as decorator. --- test/e2e/scheduling/limit_range.go | 5 +---- .../storage/csi_mock/csi_storage_capacity.go | 17 ++++++----------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 0bb77388cca..9231fd43dcb 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { the limitRange by collection with a labelSelector it MUST delete only one limitRange. */ - framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) { + framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) { ns := f.Namespace.Name lrClient := f.ClientSet.CoreV1().LimitRanges(ns) @@ -275,9 +275,6 @@ var _ = SIGDescribe("LimitRange", func() { limitRange2 := &v1.LimitRange{} *limitRange2 = *limitRange - ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout) - defer cancelCtx() - ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name)) limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create limitRange %q", lrName) diff --git a/test/e2e/storage/csi_mock/csi_storage_capacity.go b/test/e2e/storage/csi_mock/csi_storage_capacity.go index b3ed662708e..f21039279cc 100644 --- a/test/e2e/storage/csi_mock/csi_storage_capacity.go +++ b/test/e2e/storage/csi_mock/csi_storage_capacity.go @@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { for _, t := range tests { test := t - ginkgo.It(test.name, func(ctx context.Context) { + ginkgo.It(test.name, ginkgo.NodeTimeout(csiPodRunningTimeout), func(ctx context.Context) { var err error params := testParameters{ lateBinding: test.lateBinding, @@ -129,9 +129,6 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { m.init(ctx, params) ginkgo.DeferCleanup(m.cleanup) - ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) - defer cancel() - // In contrast to the raw watch, RetryWatcher is expected to deliver all events even // when the underlying raw watch gets closed prematurely // (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080). @@ -323,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, func(ctx context.Context) { + ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) { scName := "mock-csi-storage-capacity-" + f.UniqueName m.init(ctx, testParameters{ registerDriver: true, @@ -346,7 +343,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { NodeTopology: &metav1.LabelSelector{}, Capacity: &capacityQuantity, } - createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) + createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(ctx, capacity, metav1.CreateOptions{}) framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{}) } @@ -359,16 +356,14 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() { sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used") - waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) - defer cancel() condition := anyOf( - podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace), + podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace), // We only just created the CSIStorageCapacity objects, therefore // we have to ignore all older events, plus the syncDelay as our // safety margin. - podHasStorage(waitCtx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)), + podHasStorage(ctx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)), ) - err = wait.PollImmediateUntil(poll, condition, waitCtx.Done()) + err = wait.PollImmediateUntil(poll, condition, ctx.Done()) if test.expectFailure { switch { case errors.Is(err, context.DeadlineExceeded), From 6c957c06347f805eeb8e4ac7358d2387939575c9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 19 Dec 2022 12:48:51 +0100 Subject: [PATCH 3/3] e2e storage: restore timeout behavior in "CSI NodeStage error cases" In v1.26.0, these tests only used the timeout context while waiting for a CSI call. This restores that behavior, just in case that it is relevant. No test flakes are known because of this. --- test/e2e/storage/csi_mock/csi_node_stage_error_cases.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go index bb775b6eb72..d66dca33d4c 100644 --- a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go +++ b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go @@ -155,14 +155,14 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { ginkgo.By("Waiting for expected CSI calls") // Watch for all calls up to deletePod = true - ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) + timeoutCtx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout) defer cancel() for { - if ctx.Err() != nil { + if timeoutCtx.Err() != nil { framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls) } time.Sleep(1 * time.Second) - _, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls) + _, index, err := compareCSICalls(timeoutCtx, trackedCalls, test.expectedCalls, m.driver.GetCalls) framework.ExpectNoError(err, "while waiting for initial CSI calls") if index == 0 { // No CSI call received yet