Merge pull request #114783 from pohly/e2e-framework-timeouts

e2e framework: consolidate timeouts and intervals
This commit is contained in:
Kubernetes Prow Robot 2023-01-12 03:29:08 -08:00 committed by GitHub
commit 0d6dc14051
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 186 additions and 99 deletions

View File

@ -126,7 +126,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " +
"the now non-existent node and the RC to recreate it")
time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort)
time.Sleep(f.Timeouts.PodStartShort)
ginkgo.By("verifying whether the pods from the removed node are recreated")
err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)

View File

@ -222,10 +222,12 @@ func setupSuite(ctx context.Context) {
}
}
timeouts := framework.NewTimeoutContext()
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, timeouts.NodeSchedulable))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
@ -238,18 +240,18 @@ func setupSuite(ctx context.Context) {
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
//
// TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil {
e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem)
e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemDaemonsetStartup); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}

View File

@ -27,6 +27,7 @@ import (
"math/rand"
"os"
"path"
"reflect"
"strings"
"time"
@ -144,10 +145,19 @@ type Options struct {
GroupVersion *schema.GroupVersion
}
// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts.
// NewFrameworkWithCustomTimeouts makes a framework with custom timeouts.
// For timeout values that are zero the normal default value continues to
// be used.
func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework {
f := NewDefaultFramework(baseName)
f.Timeouts = timeouts
in := reflect.ValueOf(timeouts).Elem()
out := reflect.ValueOf(f.Timeouts).Elem()
for i := 0; i < in.NumField(); i++ {
value := in.Field(i)
if !value.IsZero() {
out.Field(i).Set(value)
}
}
return f
}
@ -169,7 +179,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
BaseName: baseName,
Options: options,
ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(),
Timeouts: NewTimeoutContext(),
}
// The order is important here: if the extension calls ginkgo.BeforeEach

View File

@ -0,0 +1,52 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework_test
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/test/e2e/framework"
)
func TestNewFrameworkWithCustomTimeouts(t *testing.T) {
defaultF := framework.NewDefaultFramework("test")
customTimeouts := &framework.TimeoutContext{
PodStart: 5 * time.Second,
PodDelete: time.Second,
}
customF := framework.NewFrameworkWithCustomTimeouts("test", customTimeouts)
defaultF.Timeouts.PodStart = customTimeouts.PodStart
defaultF.Timeouts.PodDelete = customTimeouts.PodDelete
assert.Equal(t, customF.Timeouts, defaultF.Timeouts)
}
func TestNewFramework(t *testing.T) {
f := framework.NewDefaultFramework("test")
timeouts := reflect.ValueOf(f.Timeouts).Elem()
for i := 0; i < timeouts.NumField(); i++ {
value := timeouts.Field(i)
if value.IsZero() {
t.Errorf("%s in Framework.Timeouts was not set.", reflect.TypeOf(*f.Timeouts).Field(i).Name)
}
}
}

View File

@ -53,9 +53,6 @@ const (
// podStartTimeout is how long to wait for the pod to be started.
podStartTimeout = 5 * time.Minute
// poll is how often to poll pods, nodes and claims.
poll = 2 * time.Second
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
singleCallTimeout = 5 * time.Minute
@ -195,7 +192,7 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri
notReady := int32(0)
var lastAPIError error
if wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
if wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
@ -287,7 +284,7 @@ func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName
lastPod *v1.Pod
start = time.Now()
)
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
lastPodError = err
if err != nil {
@ -333,7 +330,7 @@ func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns stri
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
var pods *v1.PodList
matched := 0
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (done bool, err error) {
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (done bool, err error) {
pods, err = c.CoreV1().Pods(ns).List(ctx, opts)
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
@ -366,7 +363,7 @@ func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns stri
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
matched := 0
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
@ -389,7 +386,7 @@ func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
matched := 0
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
@ -415,7 +412,7 @@ func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeo
// match the given `schedulingGates`stay in scheduling gated state.
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
matched := 0
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
@ -608,7 +605,7 @@ func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface
// than "not found" then that error is returned and the wait stops.
func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error {
var lastPod *v1.Pod
err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil // done
@ -670,7 +667,7 @@ func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podNa
func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
ginkgo.By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err := wait.PollImmediateWithContext(ctx, poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
return maybeTimeoutError(err, "waiting for pods to be responsive")
}
@ -679,7 +676,7 @@ func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string,
// It returns the matching Pods or a timeout error.
func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
actualNum := 0
err = wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
err = wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
pods, err = c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, false, "listing pods")
@ -722,7 +719,7 @@ func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface
func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
var pods []*v1.Pod
var errLast error
found := wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
found := wait.PollWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
allPods := ps.List()
pods = FilterNonRestartablePods(allPods)
if len(pods) != expect {

View File

@ -32,6 +32,7 @@ import (
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
"github.com/onsi/gomega"
gomegaformat "github.com/onsi/gomega/format"
restclient "k8s.io/client-go/rest"
@ -102,15 +103,20 @@ type TestContextType struct {
// Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling.
Tooling string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
ReportPrefix string
Prefix string
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
// timeouts contains user-configurable timeouts for various operations.
// Individual Framework instance also have such timeouts which may be
// different from these here. To avoid confusion, this field is not
// exported. Its values can be accessed through
// NewTimeoutContext.
timeouts TimeoutContext
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
ReportPrefix string
Prefix string
MinStartupPods int
EtcdUpgradeStorage string
EtcdUpgradeVersion string
GCEUpgradeScript string
@ -143,10 +149,6 @@ type TestContextType struct {
IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
NodeSchedulableTimeout time.Duration
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
SystemDaemonsetStartupTimeout time.Duration
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace.
@ -272,7 +274,9 @@ type CloudConfig struct {
}
// TestContext should be used by all tests to access common context data.
var TestContext TestContextType
var TestContext = TestContextType{
timeouts: defaultTimeouts,
}
// StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array.
type stringArrayValue struct {
@ -414,9 +418,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.")
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
flags.DurationVar(&TestContext.timeouts.SystemPodsStartup, "system-pods-startup-timeout", TestContext.timeouts.SystemPodsStartup, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.timeouts.NodeSchedulable, "node-schedulable-timeout", TestContext.timeouts.NodeSchedulable, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.timeouts.SystemDaemonsetStartup, "system-daemonsets-startup-timeout", TestContext.timeouts.SystemDaemonsetStartup, "Timeout for waiting for all system daemonsets to be ready.")
flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
@ -469,6 +473,15 @@ func AfterReadingAllFlags(t *TestContextType) {
os.Exit(0)
}
// Reconfigure gomega defaults. The poll interval should be suitable
// for most tests. The timeouts are more subjective and tests may want
// to override them, but these defaults are still better for E2E than the
// ones from Gomega (1s timeout, 10ms interval).
gomega.SetDefaultEventuallyPollingInterval(t.timeouts.Poll)
gomega.SetDefaultConsistentlyPollingInterval(t.timeouts.Poll)
gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart)
gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort)
// Only set a default host if one won't be supplied via kubeconfig
if len(t.Host) == 0 && len(t.KubeConfig) == 0 {
// Check if we can use the in-cluster config

View File

@ -18,33 +18,41 @@ package framework
import "time"
const (
// Default timeouts to be used in TimeoutContext
podStartTimeout = 5 * time.Minute
podStartShortTimeout = 2 * time.Minute
podStartSlowTimeout = 15 * time.Minute
podDeleteTimeout = 5 * time.Minute
claimProvisionTimeout = 5 * time.Minute
claimProvisionShortTimeout = 1 * time.Minute
dataSourceProvisionTimeout = 5 * time.Minute
claimBoundTimeout = 3 * time.Minute
pvReclaimTimeout = 3 * time.Minute
pvBoundTimeout = 3 * time.Minute
pvCreateTimeout = 3 * time.Minute
pvDeleteTimeout = 5 * time.Minute
pvDeleteSlowTimeout = 20 * time.Minute
snapshotCreateTimeout = 5 * time.Minute
snapshotDeleteTimeout = 5 * time.Minute
snapshotControllerMetricsTimeout = 5 * time.Minute
)
var defaultTimeouts = TimeoutContext{
Poll: 2 * time.Second, // from the former e2e/framework/pod poll interval
PodStart: 5 * time.Minute,
PodStartShort: 2 * time.Minute,
PodStartSlow: 15 * time.Minute,
PodDelete: 5 * time.Minute,
ClaimProvision: 5 * time.Minute,
ClaimProvisionShort: 1 * time.Minute,
DataSourceProvision: 5 * time.Minute,
ClaimBound: 3 * time.Minute,
PVReclaim: 3 * time.Minute,
PVBound: 3 * time.Minute,
PVCreate: 3 * time.Minute,
PVDelete: 5 * time.Minute,
PVDeleteSlow: 20 * time.Minute,
SnapshotCreate: 5 * time.Minute,
SnapshotDelete: 5 * time.Minute,
SnapshotControllerMetrics: 5 * time.Minute,
SystemPodsStartup: 10 * time.Minute,
NodeSchedulable: 30 * time.Minute,
SystemDaemonsetStartup: 5 * time.Minute,
}
// TimeoutContext contains timeout settings for several actions.
type TimeoutContext struct {
// Poll is how long to wait between API calls when waiting for some condition.
Poll time.Duration
// PodStart is how long to wait for the pod to be started.
// This value is the default for gomega.Eventually.
PodStart time.Duration
// PodStartShort is same as `PodStart`, but shorter.
// Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed.
// This value is the default for gomega.Consistently.
PodStartShort time.Duration
// PodStartSlow is same as `PodStart`, but longer.
@ -89,26 +97,31 @@ type TimeoutContext struct {
// SnapshotControllerMetrics is how long to wait for snapshot controller metrics.
SnapshotControllerMetrics time.Duration
// SystemPodsStartup is how long to wait for system pods to be running.
SystemPodsStartup time.Duration
// NodeSchedulable is how long to wait for all nodes to be schedulable.
NodeSchedulable time.Duration
// SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready.
SystemDaemonsetStartup time.Duration
}
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values.
func NewTimeoutContextWithDefaults() *TimeoutContext {
return &TimeoutContext{
PodStart: podStartTimeout,
PodStartShort: podStartShortTimeout,
PodStartSlow: podStartSlowTimeout,
PodDelete: podDeleteTimeout,
ClaimProvision: claimProvisionTimeout,
ClaimProvisionShort: claimProvisionShortTimeout,
DataSourceProvision: dataSourceProvisionTimeout,
ClaimBound: claimBoundTimeout,
PVReclaim: pvReclaimTimeout,
PVBound: pvBoundTimeout,
PVCreate: pvCreateTimeout,
PVDelete: pvDeleteTimeout,
PVDeleteSlow: pvDeleteSlowTimeout,
SnapshotCreate: snapshotCreateTimeout,
SnapshotDelete: snapshotDeleteTimeout,
SnapshotControllerMetrics: snapshotControllerMetricsTimeout,
}
// NewTimeoutContext returns a TimeoutContext with all values set either to
// hard-coded defaults or a value that was configured when running the E2E
// suite. Should be called after command line parsing.
func NewTimeoutContext() *TimeoutContext {
// Make a copy, otherwise the caller would have the ability to modify
// the original values.
copy := TestContext.timeouts
return &copy
}
// PollInterval defines how long to wait between API server queries while
// waiting for some condition.
//
// This value is the default for gomega.Eventually and gomega.Consistently.
func PollInterval() time.Duration {
return TestContext.timeouts.Poll
}

View File

@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func(ctx context.Context) {
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, f.Timeouts.NodeSchedulable))
e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute)
err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name)

View File

@ -1967,7 +1967,7 @@ func (v *azureFileVolume) DeleteVolume(ctx context.Context) {
}
func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext {
timeouts := framework.NewTimeoutContextWithDefaults()
timeouts := framework.NewTimeoutContext()
timeouts.PodStart = time.Minute * 15
timeouts.PodDelete = time.Minute * 15
timeouts.PVDelete = time.Minute * 20

View File

@ -311,7 +311,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context,
}
func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext {
timeouts := framework.NewTimeoutContextWithDefaults()
timeouts := framework.NewTimeoutContext()
if d.Timeouts == nil {
return timeouts
}

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)

View File

@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
var err error
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)

View File

@ -141,7 +141,7 @@ func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext {
if d, ok := driver.(CustomTimeoutsTestDriver); ok {
return d.GetTimeouts()
}
return framework.NewTimeoutContextWithDefaults()
return framework.NewTimeoutContext()
}
// Capability represents a feature that a volume plugin supports

View File

@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
e2eskipper.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)

View File

@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.BeforeEach(func(ctx context.Context) {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
// Enforce binding only within test space via selector labels
volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace}

View File

@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.BeforeEach(func(ctx context.Context) {
client = f.ClientSet
nameSpace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
ginkgo.By("Creating a PVC")
prefix := "pvc-protection"

View File

@ -110,7 +110,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame
Name: "HDD Regional PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(cloudZones, ","),
@ -133,7 +133,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame
Name: "HDD Regional PD with auto zone selection on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -173,7 +173,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) {
testSpec := testsuites.StorageClassTest{
Name: "Regional PD Failover on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
@ -331,7 +331,7 @@ func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns s
Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -369,7 +369,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n
test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -397,7 +397,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n
func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Timeouts: framework.NewTimeoutContextWithDefaults(),
Timeouts: framework.NewTimeoutContext(),
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{

View File

@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.BeforeEach(func(ctx context.Context) {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
})
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
ns = f.Namespace.Name
Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string)

View File

@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodes, err := e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err)

View File

@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
framework.ExpectNoError(err)
workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR")
})

View File

@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err)
if len(nodeList.Items) < 2 {

View File

@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
Bootstrap(f)
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns)
ginkgo.DeferCleanup(func() {
if len(node1KeyValueLabel) > 0 {

View File

@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout))
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodes, err := e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err)