Merge pull request #114580 from pohly/e2e-ginkgo-timeout-fixes

e2e ginkgo timeout fixes, III
This commit is contained in:
Kubernetes Prow Robot 2023-01-30 13:48:48 -08:00 committed by GitHub
commit d863d04adc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 17 additions and 23 deletions

View File

@ -539,11 +539,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
return nil
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, body interface{}) bool {
return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body)
}
// PodStateVerification represents a verification of pod state.
// Any time you have a set of pods that you want to operate against or query,
// this struct can be used to declaratively identify those pods.

View File

@ -20,6 +20,7 @@ import (
"path"
"reflect"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -63,3 +64,9 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
return codeLocation
}
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
func ConformanceIt(text string, args ...interface{}) bool {
args = append(args, ginkgo.Offset(1))
return ginkgo.It(text+" [Conformance]", args...)
}

View File

@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
the limitRange by collection with a labelSelector it MUST delete only one
limitRange.
*/
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) {
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) {
ns := f.Namespace.Name
lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
@ -275,9 +275,6 @@ var _ = SIGDescribe("LimitRange", func() {
limitRange2 := &v1.LimitRange{}
*limitRange2 = *limitRange
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
defer cancelCtx()
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)

View File

@ -155,14 +155,14 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() {
ginkgo.By("Waiting for expected CSI calls")
// Watch for all calls up to deletePod = true
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
timeoutCtx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
for {
if ctx.Err() != nil {
if timeoutCtx.Err() != nil {
framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls)
}
time.Sleep(1 * time.Second)
_, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls)
_, index, err := compareCSICalls(timeoutCtx, trackedCalls, test.expectedCalls, m.driver.GetCalls)
framework.ExpectNoError(err, "while waiting for initial CSI calls")
if index == 0 {
// No CSI call received yet

View File

@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
for _, t := range tests {
test := t
ginkgo.It(test.name, func(ctx context.Context) {
ginkgo.It(test.name, ginkgo.NodeTimeout(csiPodRunningTimeout), func(ctx context.Context) {
var err error
params := testParameters{
lateBinding: test.lateBinding,
@ -129,9 +129,6 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
m.init(ctx, params)
ginkgo.DeferCleanup(m.cleanup)
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even
// when the underlying raw watch gets closed prematurely
// (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080).
@ -323,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
}
for _, t := range tests {
test := t
ginkgo.It(t.name, func(ctx context.Context) {
ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) {
scName := "mock-csi-storage-capacity-" + f.UniqueName
m.init(ctx, testParameters{
registerDriver: true,
@ -346,7 +343,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
NodeTopology: &metav1.LabelSelector{},
Capacity: &capacityQuantity,
}
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(ctx, capacity, metav1.CreateOptions{})
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
}
@ -359,16 +356,14 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
defer cancel()
condition := anyOf(
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace),
// We only just created the CSIStorageCapacity objects, therefore
// we have to ignore all older events, plus the syncDelay as our
// safety margin.
podHasStorage(waitCtx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
podHasStorage(ctx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
)
err = wait.PollImmediateUntil(poll, condition, waitCtx.Done())
err = wait.PollImmediateUntil(poll, condition, ctx.Done())
if test.expectFailure {
switch {
case errors.Is(err, context.DeadlineExceeded),