mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Merge pull request #114580 from pohly/e2e-ginkgo-timeout-fixes
e2e ginkgo timeout fixes, III
This commit is contained in:
commit
d863d04adc
@ -539,11 +539,6 @@ func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
|
||||||
func ConformanceIt(text string, body interface{}) bool {
|
|
||||||
return ginkgo.It(text+" [Conformance]", ginkgo.Offset(1), body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodStateVerification represents a verification of pod state.
|
// PodStateVerification represents a verification of pod state.
|
||||||
// Any time you have a set of pods that you want to operate against or query,
|
// Any time you have a set of pods that you want to operate against or query,
|
||||||
// this struct can be used to declaratively identify those pods.
|
// this struct can be used to declaratively identify those pods.
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/ginkgo/v2/types"
|
"github.com/onsi/ginkgo/v2/types"
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@ -63,3 +64,9 @@ func AnnotatedLocationWithOffset(annotation string, offset int) types.CodeLocati
|
|||||||
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
|
codeLocation = types.NewCustomCodeLocation(annotation + " | " + codeLocation.String())
|
||||||
return codeLocation
|
return codeLocation
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConformanceIt is wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
||||||
|
func ConformanceIt(text string, args ...interface{}) bool {
|
||||||
|
args = append(args, ginkgo.Offset(1))
|
||||||
|
return ginkgo.It(text+" [Conformance]", args...)
|
||||||
|
}
|
||||||
|
@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||||||
the limitRange by collection with a labelSelector it MUST delete only one
|
the limitRange by collection with a labelSelector it MUST delete only one
|
||||||
limitRange.
|
limitRange.
|
||||||
*/
|
*/
|
||||||
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", func(ctx context.Context) {
|
framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) {
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
|
lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
|
||||||
@ -275,9 +275,6 @@ var _ = SIGDescribe("LimitRange", func() {
|
|||||||
limitRange2 := &v1.LimitRange{}
|
limitRange2 := &v1.LimitRange{}
|
||||||
*limitRange2 = *limitRange
|
*limitRange2 = *limitRange
|
||||||
|
|
||||||
ctx, cancelCtx := context.WithTimeout(ctx, wait.ForeverTestTimeout)
|
|
||||||
defer cancelCtx()
|
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
|
ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
|
||||||
limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
|
limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)
|
framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)
|
||||||
|
@ -155,14 +155,14 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() {
|
|||||||
|
|
||||||
ginkgo.By("Waiting for expected CSI calls")
|
ginkgo.By("Waiting for expected CSI calls")
|
||||||
// Watch for all calls up to deletePod = true
|
// Watch for all calls up to deletePod = true
|
||||||
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
|
timeoutCtx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
for {
|
for {
|
||||||
if ctx.Err() != nil {
|
if timeoutCtx.Err() != nil {
|
||||||
framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls)
|
framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls)
|
||||||
}
|
}
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
_, index, err := compareCSICalls(ctx, trackedCalls, test.expectedCalls, m.driver.GetCalls)
|
_, index, err := compareCSICalls(timeoutCtx, trackedCalls, test.expectedCalls, m.driver.GetCalls)
|
||||||
framework.ExpectNoError(err, "while waiting for initial CSI calls")
|
framework.ExpectNoError(err, "while waiting for initial CSI calls")
|
||||||
if index == 0 {
|
if index == 0 {
|
||||||
// No CSI call received yet
|
// No CSI call received yet
|
||||||
|
@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
|||||||
|
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
test := t
|
test := t
|
||||||
ginkgo.It(test.name, func(ctx context.Context) {
|
ginkgo.It(test.name, ginkgo.NodeTimeout(csiPodRunningTimeout), func(ctx context.Context) {
|
||||||
var err error
|
var err error
|
||||||
params := testParameters{
|
params := testParameters{
|
||||||
lateBinding: test.lateBinding,
|
lateBinding: test.lateBinding,
|
||||||
@ -129,9 +129,6 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
|||||||
m.init(ctx, params)
|
m.init(ctx, params)
|
||||||
ginkgo.DeferCleanup(m.cleanup)
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even
|
// In contrast to the raw watch, RetryWatcher is expected to deliver all events even
|
||||||
// when the underlying raw watch gets closed prematurely
|
// when the underlying raw watch gets closed prematurely
|
||||||
// (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080).
|
// (https://github.com/kubernetes/kubernetes/pull/93777#discussion_r467932080).
|
||||||
@ -323,7 +320,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
|||||||
}
|
}
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
test := t
|
test := t
|
||||||
ginkgo.It(t.name, func(ctx context.Context) {
|
ginkgo.It(t.name, ginkgo.SpecTimeout(f.Timeouts.PodStart), func(ctx context.Context) {
|
||||||
scName := "mock-csi-storage-capacity-" + f.UniqueName
|
scName := "mock-csi-storage-capacity-" + f.UniqueName
|
||||||
m.init(ctx, testParameters{
|
m.init(ctx, testParameters{
|
||||||
registerDriver: true,
|
registerDriver: true,
|
||||||
@ -346,7 +343,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
|||||||
NodeTopology: &metav1.LabelSelector{},
|
NodeTopology: &metav1.LabelSelector{},
|
||||||
Capacity: &capacityQuantity,
|
Capacity: &capacityQuantity,
|
||||||
}
|
}
|
||||||
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
|
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(ctx, capacity, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
|
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
|
||||||
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
|
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
@ -359,16 +356,14 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
|
|||||||
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
|
sc, _, pod := m.createPod(ctx, pvcReference) // late binding as specified above
|
||||||
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
|
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
|
||||||
|
|
||||||
waitCtx, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart)
|
|
||||||
defer cancel()
|
|
||||||
condition := anyOf(
|
condition := anyOf(
|
||||||
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
|
podRunning(ctx, f.ClientSet, pod.Name, pod.Namespace),
|
||||||
// We only just created the CSIStorageCapacity objects, therefore
|
// We only just created the CSIStorageCapacity objects, therefore
|
||||||
// we have to ignore all older events, plus the syncDelay as our
|
// we have to ignore all older events, plus the syncDelay as our
|
||||||
// safety margin.
|
// safety margin.
|
||||||
podHasStorage(waitCtx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
|
podHasStorage(ctx, f.ClientSet, pod.Name, pod.Namespace, time.Now().Add(syncDelay)),
|
||||||
)
|
)
|
||||||
err = wait.PollImmediateUntil(poll, condition, waitCtx.Done())
|
err = wait.PollImmediateUntil(poll, condition, ctx.Done())
|
||||||
if test.expectFailure {
|
if test.expectFailure {
|
||||||
switch {
|
switch {
|
||||||
case errors.Is(err, context.DeadlineExceeded),
|
case errors.Is(err, context.DeadlineExceeded),
|
||||||
|
Loading…
Reference in New Issue
Block a user