From db394db398d01a850bce9fc86068978c2a0fee02 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 17:28:28 +0100 Subject: [PATCH] e2e: move several timeouts from TestContext into TimeoutContext This consolidates timeout handling. In the future, configuration of all timeouts via a configuration file might get added. For now, the same three legacy command line flags for the timeouts that get moved continue to be supported. --- test/e2e/cloud/gcp/resize_nodes.go | 2 +- test/e2e/e2e.go | 10 +++-- test/e2e/framework/framework.go | 2 +- test/e2e/framework/test_context.go | 37 ++++++++++--------- test/e2e/framework/timeouts.go | 24 +++++++++--- test/e2e/network/dns_scale_records.go | 2 +- test/e2e/storage/drivers/in_tree.go | 2 +- test/e2e/storage/external/external.go | 2 +- .../flexvolume_mounted_volume_resize.go | 2 +- test/e2e/storage/flexvolume_online_resize.go | 2 +- test/e2e/storage/framework/testdriver.go | 2 +- test/e2e/storage/mounted_volume_resize.go | 2 +- test/e2e/storage/pv_protection.go | 2 +- test/e2e/storage/pvc_protection.go | 2 +- test/e2e/storage/regional_pd.go | 12 +++--- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 2 +- .../e2e/storage/vsphere/pvc_label_selector.go | 2 +- .../vsphere/vsphere_volume_master_restart.go | 2 +- .../vsphere/vsphere_volume_node_delete.go | 2 +- .../vsphere/vsphere_volume_node_poweroff.go | 2 +- .../vsphere/vsphere_volume_placement.go | 2 +- .../vsphere/vsphere_volume_vpxd_restart.go | 2 +- 22 files changed, 69 insertions(+), 50 deletions(-) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 139635b7206..ddb7ac96120 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -126,7 +126,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + "the now non-existent node and the RC to recreate it") - time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort) + time.Sleep(framework.NewTimeoutContext().PodStartShort) ginkgo.By("verifying whether the pods from the removed node are recreated") err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 808284553a1..0d674e83b74 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -222,10 +222,12 @@ func setupSuite(ctx context.Context) { } } + timeouts := framework.NewTimeoutContext() + // In large clusters we may get to this point but still have a bunch // of nodes without Routes created. Since this would make a node // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, timeouts.NodeSchedulable)) // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { @@ -238,18 +240,18 @@ func setupSuite(ctx context.Context) { // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout + // // TODO: In large clusters, we often observe a non-starting pods due to // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil { e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } - if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { + if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemDaemonsetStartup); err != nil { framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 79050477fd5..35f37e1534c 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -179,7 +179,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface) BaseName: baseName, Options: options, ClientSet: client, - Timeouts: NewTimeoutContextWithDefaults(), + Timeouts: NewTimeoutContext(), } // The order is important here: if the extension calls ginkgo.BeforeEach diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 4e53791daae..970ec152fd4 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -102,15 +102,20 @@ type TestContextType struct { // Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling. Tooling string - CloudConfig CloudConfig - KubectlPath string - OutputDir string - ReportDir string - ReportPrefix string - Prefix string - MinStartupPods int - // Timeout for waiting for system pods to be running - SystemPodsStartupTimeout time.Duration + // timeouts contains user-configurable timeouts for various operations. + // Individual Framework instance also have such timeouts which may be + // different from these here. To avoid confusion, this field is not + // exported. Its values can be accessed through + // NewTimeoutContext. + timeouts TimeoutContext + + CloudConfig CloudConfig + KubectlPath string + OutputDir string + ReportDir string + ReportPrefix string + Prefix string + MinStartupPods int EtcdUpgradeStorage string EtcdUpgradeVersion string GCEUpgradeScript string @@ -143,10 +148,6 @@ type TestContextType struct { IncludeClusterAutoscalerMetrics bool // Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list. OutputPrintType string - // NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable. - NodeSchedulableTimeout time.Duration - // SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready. - SystemDaemonsetStartupTimeout time.Duration // CreateTestingNS is responsible for creating namespace used for executing e2e tests. // It accepts namespace base name, which will be prepended with e2e prefix, kube client // and labels to be applied to a namespace. @@ -272,7 +273,9 @@ type CloudConfig struct { } // TestContext should be used by all tests to access common context data. -var TestContext TestContextType +var TestContext = TestContextType{ + timeouts: defaultTimeouts, +} // StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array. type stringArrayValue struct { @@ -414,9 +417,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) { flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.") flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.") flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.") - flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.") - flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.") - flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.") + flags.DurationVar(&TestContext.timeouts.SystemPodsStartup, "system-pods-startup-timeout", TestContext.timeouts.SystemPodsStartup, "Timeout for waiting for all system pods to be running before starting tests.") + flags.DurationVar(&TestContext.timeouts.NodeSchedulable, "node-schedulable-timeout", TestContext.timeouts.NodeSchedulable, "Timeout for waiting for all nodes to be schedulable.") + flags.DurationVar(&TestContext.timeouts.SystemDaemonsetStartup, "system-daemonsets-startup-timeout", TestContext.timeouts.SystemDaemonsetStartup, "Timeout for waiting for all system daemonsets to be ready.") flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.") flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.") flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index f7edd418dc5..8ab7f2bb958 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -35,6 +35,9 @@ var defaultTimeouts = TimeoutContext{ SnapshotCreate: 5 * time.Minute, SnapshotDelete: 5 * time.Minute, SnapshotControllerMetrics: 5 * time.Minute, + SystemPodsStartup: 10 * time.Minute, + NodeSchedulable: 30 * time.Minute, + SystemDaemonsetStartup: 5 * time.Minute, } // TimeoutContext contains timeout settings for several actions. @@ -88,12 +91,23 @@ type TimeoutContext struct { // SnapshotControllerMetrics is how long to wait for snapshot controller metrics. SnapshotControllerMetrics time.Duration + + // SystemPodsStartup is how long to wait for system pods to be running. + SystemPodsStartup time.Duration + + // NodeSchedulable is how long to wait for all nodes to be schedulable. + NodeSchedulable time.Duration + + // SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready. + SystemDaemonsetStartup time.Duration } -// NewTimeoutContextWithDefaults returns a TimeoutContext with default values. -func NewTimeoutContextWithDefaults() *TimeoutContext { - // Make a copy, otherwise the caller would have the ability to - // modify the defaults - copy := defaultTimeouts +// NewTimeoutContext returns a TimeoutContext with all values set either to +// hard-coded defaults or a value that was configured when running the E2E +// suite. Should be called after command line parsing. +func NewTimeoutContext() *TimeoutContext { + // Make a copy, otherwise the caller would have the ability to modify + // the original values. + copy := TestContext.timeouts return © } diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 33b46da7868..3aa1189d1b8 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func(ctx context.Context) { - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, f.Timeouts.NodeSchedulable)) e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name) diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index a44279fcffd..04c4e08285e 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -1967,7 +1967,7 @@ func (v *azureFileVolume) DeleteVolume(ctx context.Context) { } func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext { - timeouts := framework.NewTimeoutContextWithDefaults() + timeouts := framework.NewTimeoutContext() timeouts.PodStart = time.Minute * 15 timeouts.PodDelete = time.Minute * 15 timeouts.PVDelete = time.Minute * 20 diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index e6408a97f0d..7150a3d73b8 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -311,7 +311,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context, } func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext { - timeouts := framework.NewTimeoutContextWithDefaults() + timeouts := framework.NewTimeoutContext() if d.Timeouts == nil { return timeouts } diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 8aa87eb6534..92c17ee021a 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 18d86eb7426..b1cb7140e97 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) var err error node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) diff --git a/test/e2e/storage/framework/testdriver.go b/test/e2e/storage/framework/testdriver.go index ea6b80847a3..6614e8d0280 100644 --- a/test/e2e/storage/framework/testdriver.go +++ b/test/e2e/storage/framework/testdriver.go @@ -141,7 +141,7 @@ func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext { if d, ok := driver.(CustomTimeoutsTestDriver); ok { return d.GetTimeouts() } - return framework.NewTimeoutContextWithDefaults() + return framework.NewTimeoutContext() } // Capability represents a feature that a volume plugin supports diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index dcce82a77b8..27dbf322c76 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 4e24f2c8a57..af0ed44251b 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) // Enforce binding only within test space via selector labels volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index d501ff8a4c5..65bacf8ecc2 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) ginkgo.By("Creating a PVC") prefix := "pvc-protection" diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 7e2aad92e37..228a30d2f9b 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -110,7 +110,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame Name: "HDD Regional PD on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "zones": strings.Join(cloudZones, ","), @@ -133,7 +133,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame Name: "HDD Regional PD with auto zone selection on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -173,7 +173,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) { testSpec := testsuites.StorageClassTest{ Name: "Regional PD Failover on GCE/GKE", CloudProviders: []string{"gce", "gke"}, - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ "type": "pd-standard", @@ -331,7 +331,7 @@ func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns s Client: c, Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -369,7 +369,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n test := testsuites.StorageClassTest{ Name: "Regional PD storage class with allowedTopologies test on GCE", Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -397,7 +397,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ Client: c, - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 9255f7c0cc8..bd1006f7dd1 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) }) ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index fc06c9f9dd6..c458e7ecdfe 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele ns = f.Namespace.Name Bootstrap(f) nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) ssdlabels = make(map[string]string) ssdlabels["volume-type"] = "ssd" vvollabels = make(map[string]string) diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 5f7a806df5b..b7a2af40b10 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index b065681f545..8d6f4567e41 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) framework.ExpectNoError(err) workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 309f4a62c32..b505f2aa06d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 0fa945ea8ef..f8ee5686d47 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { Bootstrap(f) c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns) ginkgo.DeferCleanup(func() { if len(node1KeyValueLabel) > 0 { diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 2d3da8272f7..878b0890487 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err)