From cdb0cd03877c522e3ba3bc7ce22851a6b2ea710f Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 16:31:13 +0100 Subject: [PATCH 1/6] e2e framework: simplify timeout defaults Filling in the default values directly in the struct eliminates the need to define constants that aren't used anywhere else. --- test/e2e/framework/timeouts.go | 59 +++++++++++++--------------------- 1 file changed, 22 insertions(+), 37 deletions(-) diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index ac636ddd76d..f7edd418dc5 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -18,25 +18,24 @@ package framework import "time" -const ( - // Default timeouts to be used in TimeoutContext - podStartTimeout = 5 * time.Minute - podStartShortTimeout = 2 * time.Minute - podStartSlowTimeout = 15 * time.Minute - podDeleteTimeout = 5 * time.Minute - claimProvisionTimeout = 5 * time.Minute - claimProvisionShortTimeout = 1 * time.Minute - dataSourceProvisionTimeout = 5 * time.Minute - claimBoundTimeout = 3 * time.Minute - pvReclaimTimeout = 3 * time.Minute - pvBoundTimeout = 3 * time.Minute - pvCreateTimeout = 3 * time.Minute - pvDeleteTimeout = 5 * time.Minute - pvDeleteSlowTimeout = 20 * time.Minute - snapshotCreateTimeout = 5 * time.Minute - snapshotDeleteTimeout = 5 * time.Minute - snapshotControllerMetricsTimeout = 5 * time.Minute -) +var defaultTimeouts = TimeoutContext{ + PodStart: 5 * time.Minute, + PodStartShort: 2 * time.Minute, + PodStartSlow: 15 * time.Minute, + PodDelete: 5 * time.Minute, + ClaimProvision: 5 * time.Minute, + ClaimProvisionShort: 1 * time.Minute, + DataSourceProvision: 5 * time.Minute, + ClaimBound: 3 * time.Minute, + PVReclaim: 3 * time.Minute, + PVBound: 3 * time.Minute, + PVCreate: 3 * time.Minute, + PVDelete: 5 * time.Minute, + PVDeleteSlow: 20 * time.Minute, + SnapshotCreate: 5 * time.Minute, + SnapshotDelete: 5 * time.Minute, + SnapshotControllerMetrics: 5 * time.Minute, +} // TimeoutContext contains timeout settings for several actions. type TimeoutContext struct { @@ -93,22 +92,8 @@ type TimeoutContext struct { // NewTimeoutContextWithDefaults returns a TimeoutContext with default values. func NewTimeoutContextWithDefaults() *TimeoutContext { - return &TimeoutContext{ - PodStart: podStartTimeout, - PodStartShort: podStartShortTimeout, - PodStartSlow: podStartSlowTimeout, - PodDelete: podDeleteTimeout, - ClaimProvision: claimProvisionTimeout, - ClaimProvisionShort: claimProvisionShortTimeout, - DataSourceProvision: dataSourceProvisionTimeout, - ClaimBound: claimBoundTimeout, - PVReclaim: pvReclaimTimeout, - PVBound: pvBoundTimeout, - PVCreate: pvCreateTimeout, - PVDelete: pvDeleteTimeout, - PVDeleteSlow: pvDeleteSlowTimeout, - SnapshotCreate: snapshotCreateTimeout, - SnapshotDelete: snapshotDeleteTimeout, - SnapshotControllerMetrics: snapshotControllerMetricsTimeout, - } + // Make a copy, otherwise the caller would have the ability to + // modify the defaults + copy := defaultTimeouts + return © } From f0cc053544b7e82127a49eb160c551aa20086746 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 17:01:33 +0100 Subject: [PATCH 2/6] e2e framework: enable extending TimeoutContext If we were to add new fields in TimeoutContext, the current users of NewFrameworkWithCustomTimeouts might run into failures unless they get modified to also set those new fields. This is error-prone. A better approach is to let users of NewFrameworkWithCustomTimeouts override fields by setting just those and use the normal defaults for the others. --- test/e2e/framework/framework.go | 14 ++++- .../internal/unittests/framework_test.go | 52 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 test/e2e/framework/internal/unittests/framework_test.go diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index c8ba3aa0879..79050477fd5 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -27,6 +27,7 @@ import ( "math/rand" "os" "path" + "reflect" "strings" "time" @@ -144,10 +145,19 @@ type Options struct { GroupVersion *schema.GroupVersion } -// NewFrameworkWithCustomTimeouts makes a framework with with custom timeouts. +// NewFrameworkWithCustomTimeouts makes a framework with custom timeouts. +// For timeout values that are zero the normal default value continues to +// be used. func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *Framework { f := NewDefaultFramework(baseName) - f.Timeouts = timeouts + in := reflect.ValueOf(timeouts).Elem() + out := reflect.ValueOf(f.Timeouts).Elem() + for i := 0; i < in.NumField(); i++ { + value := in.Field(i) + if !value.IsZero() { + out.Field(i).Set(value) + } + } return f } diff --git a/test/e2e/framework/internal/unittests/framework_test.go b/test/e2e/framework/internal/unittests/framework_test.go new file mode 100644 index 00000000000..30c8d8d0311 --- /dev/null +++ b/test/e2e/framework/internal/unittests/framework_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework_test + +import ( + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "k8s.io/kubernetes/test/e2e/framework" +) + +func TestNewFrameworkWithCustomTimeouts(t *testing.T) { + defaultF := framework.NewDefaultFramework("test") + customTimeouts := &framework.TimeoutContext{ + PodStart: 5 * time.Second, + PodDelete: time.Second, + } + customF := framework.NewFrameworkWithCustomTimeouts("test", customTimeouts) + + defaultF.Timeouts.PodStart = customTimeouts.PodStart + defaultF.Timeouts.PodDelete = customTimeouts.PodDelete + assert.Equal(t, customF.Timeouts, defaultF.Timeouts) +} + +func TestNewFramework(t *testing.T) { + f := framework.NewDefaultFramework("test") + + timeouts := reflect.ValueOf(f.Timeouts).Elem() + for i := 0; i < timeouts.NumField(); i++ { + value := timeouts.Field(i) + if value.IsZero() { + t.Errorf("%s in Framework.Timeouts was not set.", reflect.TypeOf(*f.Timeouts).Field(i).Name) + } + } +} From db394db398d01a850bce9fc86068978c2a0fee02 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 17:28:28 +0100 Subject: [PATCH 3/6] e2e: move several timeouts from TestContext into TimeoutContext This consolidates timeout handling. In the future, configuration of all timeouts via a configuration file might get added. For now, the same three legacy command line flags for the timeouts that get moved continue to be supported. --- test/e2e/cloud/gcp/resize_nodes.go | 2 +- test/e2e/e2e.go | 10 +++-- test/e2e/framework/framework.go | 2 +- test/e2e/framework/test_context.go | 37 ++++++++++--------- test/e2e/framework/timeouts.go | 24 +++++++++--- test/e2e/network/dns_scale_records.go | 2 +- test/e2e/storage/drivers/in_tree.go | 2 +- test/e2e/storage/external/external.go | 2 +- .../flexvolume_mounted_volume_resize.go | 2 +- test/e2e/storage/flexvolume_online_resize.go | 2 +- test/e2e/storage/framework/testdriver.go | 2 +- test/e2e/storage/mounted_volume_resize.go | 2 +- test/e2e/storage/pv_protection.go | 2 +- test/e2e/storage/pvc_protection.go | 2 +- test/e2e/storage/regional_pd.go | 12 +++--- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 2 +- .../e2e/storage/vsphere/pvc_label_selector.go | 2 +- .../vsphere/vsphere_volume_master_restart.go | 2 +- .../vsphere/vsphere_volume_node_delete.go | 2 +- .../vsphere/vsphere_volume_node_poweroff.go | 2 +- .../vsphere/vsphere_volume_placement.go | 2 +- .../vsphere/vsphere_volume_vpxd_restart.go | 2 +- 22 files changed, 69 insertions(+), 50 deletions(-) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 139635b7206..ddb7ac96120 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -126,7 +126,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + "the now non-existent node and the RC to recreate it") - time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort) + time.Sleep(framework.NewTimeoutContext().PodStartShort) ginkgo.By("verifying whether the pods from the removed node are recreated") err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 808284553a1..0d674e83b74 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -222,10 +222,12 @@ func setupSuite(ctx context.Context) { } } + timeouts := framework.NewTimeoutContext() + // In large clusters we may get to this point but still have a bunch // of nodes without Routes created. Since this would make a node // unschedulable, we need to wait until all of them are schedulable. - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, timeouts.NodeSchedulable)) // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { @@ -238,18 +240,18 @@ func setupSuite(ctx context.Context) { // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout + // // TODO: In large clusters, we often observe a non-starting pods due to // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil { e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } - if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { + if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemDaemonsetStartup); err != nil { framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 79050477fd5..35f37e1534c 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -179,7 +179,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface) BaseName: baseName, Options: options, ClientSet: client, - Timeouts: NewTimeoutContextWithDefaults(), + Timeouts: NewTimeoutContext(), } // The order is important here: if the extension calls ginkgo.BeforeEach diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 4e53791daae..970ec152fd4 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -102,15 +102,20 @@ type TestContextType struct { // Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling. Tooling string - CloudConfig CloudConfig - KubectlPath string - OutputDir string - ReportDir string - ReportPrefix string - Prefix string - MinStartupPods int - // Timeout for waiting for system pods to be running - SystemPodsStartupTimeout time.Duration + // timeouts contains user-configurable timeouts for various operations. + // Individual Framework instance also have such timeouts which may be + // different from these here. To avoid confusion, this field is not + // exported. Its values can be accessed through + // NewTimeoutContext. + timeouts TimeoutContext + + CloudConfig CloudConfig + KubectlPath string + OutputDir string + ReportDir string + ReportPrefix string + Prefix string + MinStartupPods int EtcdUpgradeStorage string EtcdUpgradeVersion string GCEUpgradeScript string @@ -143,10 +148,6 @@ type TestContextType struct { IncludeClusterAutoscalerMetrics bool // Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list. OutputPrintType string - // NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable. - NodeSchedulableTimeout time.Duration - // SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready. - SystemDaemonsetStartupTimeout time.Duration // CreateTestingNS is responsible for creating namespace used for executing e2e tests. // It accepts namespace base name, which will be prepended with e2e prefix, kube client // and labels to be applied to a namespace. @@ -272,7 +273,9 @@ type CloudConfig struct { } // TestContext should be used by all tests to access common context data. -var TestContext TestContextType +var TestContext = TestContextType{ + timeouts: defaultTimeouts, +} // StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array. type stringArrayValue struct { @@ -414,9 +417,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) { flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.") flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.") flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.") - flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.") - flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.") - flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.") + flags.DurationVar(&TestContext.timeouts.SystemPodsStartup, "system-pods-startup-timeout", TestContext.timeouts.SystemPodsStartup, "Timeout for waiting for all system pods to be running before starting tests.") + flags.DurationVar(&TestContext.timeouts.NodeSchedulable, "node-schedulable-timeout", TestContext.timeouts.NodeSchedulable, "Timeout for waiting for all nodes to be schedulable.") + flags.DurationVar(&TestContext.timeouts.SystemDaemonsetStartup, "system-daemonsets-startup-timeout", TestContext.timeouts.SystemDaemonsetStartup, "Timeout for waiting for all system daemonsets to be ready.") flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.") flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.") flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index f7edd418dc5..8ab7f2bb958 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -35,6 +35,9 @@ var defaultTimeouts = TimeoutContext{ SnapshotCreate: 5 * time.Minute, SnapshotDelete: 5 * time.Minute, SnapshotControllerMetrics: 5 * time.Minute, + SystemPodsStartup: 10 * time.Minute, + NodeSchedulable: 30 * time.Minute, + SystemDaemonsetStartup: 5 * time.Minute, } // TimeoutContext contains timeout settings for several actions. @@ -88,12 +91,23 @@ type TimeoutContext struct { // SnapshotControllerMetrics is how long to wait for snapshot controller metrics. SnapshotControllerMetrics time.Duration + + // SystemPodsStartup is how long to wait for system pods to be running. + SystemPodsStartup time.Duration + + // NodeSchedulable is how long to wait for all nodes to be schedulable. + NodeSchedulable time.Duration + + // SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready. + SystemDaemonsetStartup time.Duration } -// NewTimeoutContextWithDefaults returns a TimeoutContext with default values. -func NewTimeoutContextWithDefaults() *TimeoutContext { - // Make a copy, otherwise the caller would have the ability to - // modify the defaults - copy := defaultTimeouts +// NewTimeoutContext returns a TimeoutContext with all values set either to +// hard-coded defaults or a value that was configured when running the E2E +// suite. Should be called after command line parsing. +func NewTimeoutContext() *TimeoutContext { + // Make a copy, otherwise the caller would have the ability to modify + // the original values. + copy := TestContext.timeouts return © } diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 33b46da7868..3aa1189d1b8 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func(ctx context.Context) { - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, f.Timeouts.NodeSchedulable)) e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name) diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index a44279fcffd..04c4e08285e 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -1967,7 +1967,7 @@ func (v *azureFileVolume) DeleteVolume(ctx context.Context) { } func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext { - timeouts := framework.NewTimeoutContextWithDefaults() + timeouts := framework.NewTimeoutContext() timeouts.PodStart = time.Minute * 15 timeouts.PodDelete = time.Minute * 15 timeouts.PVDelete = time.Minute * 20 diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index e6408a97f0d..7150a3d73b8 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -311,7 +311,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context, } func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext { - timeouts := framework.NewTimeoutContextWithDefaults() + timeouts := framework.NewTimeoutContext() if d.Timeouts == nil { return timeouts } diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 8aa87eb6534..92c17ee021a 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow] e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 18d86eb7426..b1cb7140e97 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan e2eskipper.SkipUnlessSSHKeyPresent() c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) var err error node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) diff --git a/test/e2e/storage/framework/testdriver.go b/test/e2e/storage/framework/testdriver.go index ea6b80847a3..6614e8d0280 100644 --- a/test/e2e/storage/framework/testdriver.go +++ b/test/e2e/storage/framework/testdriver.go @@ -141,7 +141,7 @@ func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext { if d, ok := driver.(CustomTimeoutsTestDriver); ok { return d.GetTimeouts() } - return framework.NewTimeoutContextWithDefaults() + return framework.NewTimeoutContext() } // Capability represents a feature that a volume plugin supports diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index dcce82a77b8..27dbf322c76 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 4e24f2c8a57..af0ed44251b 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) // Enforce binding only within test space via selector labels volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index d501ff8a4c5..65bacf8ecc2 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.BeforeEach(func(ctx context.Context) { client = f.ClientSet nameSpace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) ginkgo.By("Creating a PVC") prefix := "pvc-protection" diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 7e2aad92e37..228a30d2f9b 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -110,7 +110,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame Name: "HDD Regional PD on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "zones": strings.Join(cloudZones, ","), @@ -133,7 +133,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame Name: "HDD Regional PD with auto zone selection on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -173,7 +173,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) { testSpec := testsuites.StorageClassTest{ Name: "Regional PD Failover on GCE/GKE", CloudProviders: []string{"gce", "gke"}, - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ "type": "pd-standard", @@ -331,7 +331,7 @@ func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns s Client: c, Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -369,7 +369,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n test := testsuites.StorageClassTest{ Name: "Regional PD storage class with allowedTopologies test on GCE", Provisioner: "kubernetes.io/gce-pd", - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Parameters: map[string]string{ "type": "pd-standard", "replication-type": "regional-pd", @@ -397,7 +397,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ Client: c, - Timeouts: framework.NewTimeoutContextWithDefaults(), + Timeouts: framework.NewTimeoutContext(), Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 9255f7c0cc8..bd1006f7dd1 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo ginkgo.BeforeEach(func(ctx context.Context) { c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) }) ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index fc06c9f9dd6..c458e7ecdfe 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele ns = f.Namespace.Name Bootstrap(f) nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) ssdlabels = make(map[string]string) ssdlabels["volume-type"] = "ssd" vvollabels = make(map[string]string) diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 5f7a806df5b..b7a2af40b10 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index b065681f545..8d6f4567e41 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) framework.ExpectNoError(err) workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 309f4a62c32..b505f2aa06d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) framework.ExpectNoError(err) if len(nodeList.Items) < 2 { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 0fa945ea8ef..f8ee5686d47 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { Bootstrap(f) c = f.ClientSet ns = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns) ginkgo.DeferCleanup(func() { if len(node1KeyValueLabel) > 0 { diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 2d3da8272f7..878b0890487 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) framework.ExpectNoError(err) From 16a6f70e113df12e4c37457c26c67a780ac91a89 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 17:55:24 +0100 Subject: [PATCH 4/6] e2e: add PollInterval() Various different tests all have their own poll intervals. As a start towards consolidating that, the interval from test/e2e/framework/pod (as one of the most common cases for polling) is moved into the framework. Changing other helper packages and tests needs to follow. --- test/e2e/framework/pod/wait.go | 23 ++++++++++------------- test/e2e/framework/timeouts.go | 10 ++++++++++ 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go index 3473cabed1b..3a0da54ff2d 100644 --- a/test/e2e/framework/pod/wait.go +++ b/test/e2e/framework/pod/wait.go @@ -53,9 +53,6 @@ const ( // podStartTimeout is how long to wait for the pod to be started. podStartTimeout = 5 * time.Minute - // poll is how often to poll pods, nodes and claims. - poll = 2 * time.Second - // singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent // transient failures from failing tests. singleCallTimeout = 5 * time.Minute @@ -195,7 +192,7 @@ func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns stri notReady := int32(0) var lastAPIError error - if wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + if wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { // We get the new list of pods, replication controllers, and // replica sets in every iteration because more pods come // online during startup and we want to ensure they are also @@ -287,7 +284,7 @@ func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName lastPod *v1.Pod start = time.Now() ) - err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) lastPodError = err if err != nil { @@ -333,7 +330,7 @@ func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns stri framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc) var pods *v1.PodList matched := 0 - err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (done bool, err error) { + err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (done bool, err error) { pods, err = c.CoreV1().Pods(ns).List(ctx, opts) if err != nil { return handleWaitingAPIError(err, true, "listing pods") @@ -366,7 +363,7 @@ func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns stri // WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running. func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error { matched := 0 - err := wait.PollImmediate(poll, timeout, func() (done bool, err error) { + err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return handleWaitingAPIError(err, true, "listing pods") @@ -389,7 +386,7 @@ func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time. // WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state. func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error { matched := 0 - err := wait.PollImmediate(poll, timeout, func() (done bool, err error) { + err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return handleWaitingAPIError(err, true, "listing pods") @@ -415,7 +412,7 @@ func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeo // match the given `schedulingGates`stay in scheduling gated state. func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error { matched := 0 - err := wait.PollImmediate(poll, timeout, func() (done bool, err error) { + err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return handleWaitingAPIError(err, true, "listing pods") @@ -608,7 +605,7 @@ func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface // than "not found" then that error is returned and the wait stops. func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error { var lastPod *v1.Pod - err := wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil // done @@ -670,7 +667,7 @@ func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podNa func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { ginkgo.By("trying to dial each unique pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err := wait.PollImmediateWithContext(ctx, poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) + err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) return maybeTimeoutError(err, "waiting for pods to be responsive") } @@ -679,7 +676,7 @@ func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, // It returns the matching Pods or a timeout error. func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) { actualNum := 0 - err = wait.PollImmediateWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + err = wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { pods, err = c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{}) if err != nil { return handleWaitingAPIError(err, false, "listing pods") @@ -722,7 +719,7 @@ func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { var pods []*v1.Pod var errLast error - found := wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) { + found := wait.PollWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { allPods := ps.List() pods = FilterNonRestartablePods(allPods) if len(pods) != expect { diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index 8ab7f2bb958..99fc8b9adfa 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -19,6 +19,7 @@ package framework import "time" var defaultTimeouts = TimeoutContext{ + Poll: 2 * time.Second, // from the former e2e/framework/pod poll interval PodStart: 5 * time.Minute, PodStartShort: 2 * time.Minute, PodStartSlow: 15 * time.Minute, @@ -42,6 +43,9 @@ var defaultTimeouts = TimeoutContext{ // TimeoutContext contains timeout settings for several actions. type TimeoutContext struct { + // Poll is how long to wait between API calls when waiting for some condition. + Poll time.Duration + // PodStart is how long to wait for the pod to be started. PodStart time.Duration @@ -111,3 +115,9 @@ func NewTimeoutContext() *TimeoutContext { copy := TestContext.timeouts return © } + +// PollInterval defines how long to wait between API server queries while +// waiting for some condition. +func PollInterval() time.Duration { + return TestContext.timeouts.Poll +} From 0f9a8d38be43f9e3387776983c91ce3350e82f13 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 3 Jan 2023 20:03:20 +0100 Subject: [PATCH 5/6] e2e framework: configure poll interval+duration for gomega Primarily this protects against accidentally polling with the default interval of 10ms. Setting these defaults may also make some tests simpler because they don't need to override the defaults. --- test/e2e/framework/test_context.go | 10 ++++++++++ test/e2e/framework/timeouts.go | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 970ec152fd4..1b4a19445f4 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -32,6 +32,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2/types" + "github.com/onsi/gomega" gomegaformat "github.com/onsi/gomega/format" restclient "k8s.io/client-go/rest" @@ -472,6 +473,15 @@ func AfterReadingAllFlags(t *TestContextType) { os.Exit(0) } + // Reconfigure gomega defaults. The poll interval should be suitable + // for most tests. The timeouts are more subjective and tests may want + // to override them, but these defaults are still better for E2E than the + // ones from Gomega (1s timeout, 10ms interval). + gomega.SetDefaultEventuallyPollingInterval(t.timeouts.Poll) + gomega.SetDefaultConsistentlyPollingInterval(t.timeouts.Poll) + gomega.SetDefaultEventuallyTimeout(t.timeouts.PodStart) + gomega.SetDefaultConsistentlyDuration(t.timeouts.PodStartShort) + // Only set a default host if one won't be supplied via kubeconfig if len(t.Host) == 0 && len(t.KubeConfig) == 0 { // Check if we can use the in-cluster config diff --git a/test/e2e/framework/timeouts.go b/test/e2e/framework/timeouts.go index 99fc8b9adfa..5cfc29edba3 100644 --- a/test/e2e/framework/timeouts.go +++ b/test/e2e/framework/timeouts.go @@ -47,10 +47,12 @@ type TimeoutContext struct { Poll time.Duration // PodStart is how long to wait for the pod to be started. + // This value is the default for gomega.Eventually. PodStart time.Duration // PodStartShort is same as `PodStart`, but shorter. // Use it in a case-by-case basis, mostly when you are sure pod start will not be delayed. + // This value is the default for gomega.Consistently. PodStartShort time.Duration // PodStartSlow is same as `PodStart`, but longer. @@ -118,6 +120,8 @@ func NewTimeoutContext() *TimeoutContext { // PollInterval defines how long to wait between API server queries while // waiting for some condition. +// +// This value is the default for gomega.Eventually and gomega.Consistently. func PollInterval() time.Duration { return TestContext.timeouts.Poll } From 0424c4654e8a6a7df0ddbc273f407d0679177d6c Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 10 Jan 2023 11:36:45 +0100 Subject: [PATCH 6/6] e2e gcp: use timeout from f.Timeouts This is simpler, no need to construct an entirely new struct anymore. --- test/e2e/cloud/gcp/resize_nodes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index ddb7ac96120..60a02fbb41e 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -126,7 +126,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + "the now non-existent node and the RC to recreate it") - time.Sleep(framework.NewTimeoutContext().PodStartShort) + time.Sleep(f.Timeouts.PodStartShort) ginkgo.By("verifying whether the pods from the removed node are recreated") err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)