e2e: move several timeouts from TestContext into TimeoutContext

This consolidates timeout handling. In the future, configuration of all
timeouts via a configuration file might get added. For now, the same three
legacy command line flags for the timeouts that get moved continue to be
supported.
This commit is contained in:
Patrick Ohly 2023-01-03 17:28:28 +01:00
parent f0cc053544
commit db394db398
22 changed files with 69 additions and 50 deletions

View File

@ -126,7 +126,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " + ginkgo.By("waiting 2 minutes for the watch in the podGC to catch up, remove any pods scheduled on " +
"the now non-existent node and the RC to recreate it") "the now non-existent node and the RC to recreate it")
time.Sleep(framework.NewTimeoutContextWithDefaults().PodStartShort) time.Sleep(framework.NewTimeoutContext().PodStartShort)
ginkgo.By("verifying whether the pods from the removed node are recreated") ginkgo.By("verifying whether the pods from the removed node are recreated")
err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount) err = e2epod.VerifyPods(ctx, c, ns, name, true, originalNodeCount)

View File

@ -222,10 +222,12 @@ func setupSuite(ctx context.Context) {
} }
} }
timeouts := framework.NewTimeoutContext()
// In large clusters we may get to this point but still have a bunch // In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node // of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable. // unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, timeouts.NodeSchedulable))
// If NumNodes is not specified then auto-detect how many are scheduleable and not tainted // If NumNodes is not specified then auto-detect how many are scheduleable and not tainted
if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes { if framework.TestContext.CloudConfig.NumNodes == framework.DefaultNumNodes {
@ -238,18 +240,18 @@ func setupSuite(ctx context.Context) {
// cluster infrastructure pods that are being pulled or started can block // cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and // test pods from running, and tests that ensure all pods are running and
// ready will fail). // ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout //
// TODO: In large clusters, we often observe a non-starting pods due to // TODO: In large clusters, we often observe a non-starting pods due to
// #41007. To avoid those pods preventing the whole test runs (and just // #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the // wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes). // number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil {
e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem)
e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)
} }
if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { if err := waitForDaemonSets(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemDaemonsetStartup); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err) framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
} }

View File

@ -179,7 +179,7 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
BaseName: baseName, BaseName: baseName,
Options: options, Options: options,
ClientSet: client, ClientSet: client,
Timeouts: NewTimeoutContextWithDefaults(), Timeouts: NewTimeoutContext(),
} }
// The order is important here: if the extension calls ginkgo.BeforeEach // The order is important here: if the extension calls ginkgo.BeforeEach

View File

@ -102,6 +102,13 @@ type TestContextType struct {
// Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling. // Tooling is the tooling in use (e.g. kops, gke). Provider is the cloud provider and might not uniquely identify the tooling.
Tooling string Tooling string
// timeouts contains user-configurable timeouts for various operations.
// Individual Framework instance also have such timeouts which may be
// different from these here. To avoid confusion, this field is not
// exported. Its values can be accessed through
// NewTimeoutContext.
timeouts TimeoutContext
CloudConfig CloudConfig CloudConfig CloudConfig
KubectlPath string KubectlPath string
OutputDir string OutputDir string
@ -109,8 +116,6 @@ type TestContextType struct {
ReportPrefix string ReportPrefix string
Prefix string Prefix string
MinStartupPods int MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
EtcdUpgradeStorage string EtcdUpgradeStorage string
EtcdUpgradeVersion string EtcdUpgradeVersion string
GCEUpgradeScript string GCEUpgradeScript string
@ -143,10 +148,6 @@ type TestContextType struct {
IncludeClusterAutoscalerMetrics bool IncludeClusterAutoscalerMetrics bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list. // Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
NodeSchedulableTimeout time.Duration
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
SystemDaemonsetStartupTimeout time.Duration
// CreateTestingNS is responsible for creating namespace used for executing e2e tests. // CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client // It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace. // and labels to be applied to a namespace.
@ -272,7 +273,9 @@ type CloudConfig struct {
} }
// TestContext should be used by all tests to access common context data. // TestContext should be used by all tests to access common context data.
var TestContext TestContextType var TestContext = TestContextType{
timeouts: defaultTimeouts,
}
// StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array. // StringArrayValue is used with flag.Var for a comma-separated list of strings placed into a string array.
type stringArrayValue struct { type stringArrayValue struct {
@ -414,9 +417,9 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.") flags.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.") flags.StringVar(&cloudConfig.ConfigFile, "cloud-config-file", "", "Cloud config file. Only required if provider is azure or vsphere.")
flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.") flags.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used. If set to -1, no pods are checked and tests run straight away.")
flags.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.") flags.DurationVar(&TestContext.timeouts.SystemPodsStartup, "system-pods-startup-timeout", TestContext.timeouts.SystemPodsStartup, "Timeout for waiting for all system pods to be running before starting tests.")
flags.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.") flags.DurationVar(&TestContext.timeouts.NodeSchedulable, "node-schedulable-timeout", TestContext.timeouts.NodeSchedulable, "Timeout for waiting for all nodes to be schedulable.")
flags.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.") flags.DurationVar(&TestContext.timeouts.SystemDaemonsetStartup, "system-daemonsets-startup-timeout", TestContext.timeouts.SystemDaemonsetStartup, "Timeout for waiting for all system daemonsets to be ready.")
flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.") flags.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.") flags.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.") flags.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")

View File

@ -35,6 +35,9 @@ var defaultTimeouts = TimeoutContext{
SnapshotCreate: 5 * time.Minute, SnapshotCreate: 5 * time.Minute,
SnapshotDelete: 5 * time.Minute, SnapshotDelete: 5 * time.Minute,
SnapshotControllerMetrics: 5 * time.Minute, SnapshotControllerMetrics: 5 * time.Minute,
SystemPodsStartup: 10 * time.Minute,
NodeSchedulable: 30 * time.Minute,
SystemDaemonsetStartup: 5 * time.Minute,
} }
// TimeoutContext contains timeout settings for several actions. // TimeoutContext contains timeout settings for several actions.
@ -88,12 +91,23 @@ type TimeoutContext struct {
// SnapshotControllerMetrics is how long to wait for snapshot controller metrics. // SnapshotControllerMetrics is how long to wait for snapshot controller metrics.
SnapshotControllerMetrics time.Duration SnapshotControllerMetrics time.Duration
// SystemPodsStartup is how long to wait for system pods to be running.
SystemPodsStartup time.Duration
// NodeSchedulable is how long to wait for all nodes to be schedulable.
NodeSchedulable time.Duration
// SystemDaemonsetStartup is how long to wait for all system daemonsets to be ready.
SystemDaemonsetStartup time.Duration
} }
// NewTimeoutContextWithDefaults returns a TimeoutContext with default values. // NewTimeoutContext returns a TimeoutContext with all values set either to
func NewTimeoutContextWithDefaults() *TimeoutContext { // hard-coded defaults or a value that was configured when running the E2E
// Make a copy, otherwise the caller would have the ability to // suite. Should be called after command line parsing.
// modify the defaults func NewTimeoutContext() *TimeoutContext {
copy := defaultTimeouts // Make a copy, otherwise the caller would have the ability to modify
// the original values.
copy := TestContext.timeouts
return &copy return &copy
} }

View File

@ -47,7 +47,7 @@ var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, f.Timeouts.NodeSchedulable))
e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute)
err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name) err := framework.CheckTestingNSDeletedExcept(ctx, f.ClientSet, f.Namespace.Name)

View File

@ -1967,7 +1967,7 @@ func (v *azureFileVolume) DeleteVolume(ctx context.Context) {
} }
func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext { func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext {
timeouts := framework.NewTimeoutContextWithDefaults() timeouts := framework.NewTimeoutContext()
timeouts.PodStart = time.Minute * 15 timeouts.PodStart = time.Minute * 15
timeouts.PodDelete = time.Minute * 15 timeouts.PodDelete = time.Minute * 15
timeouts.PVDelete = time.Minute * 20 timeouts.PVDelete = time.Minute * 20

View File

@ -311,7 +311,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(ctx context.Context,
} }
func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext { func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext {
timeouts := framework.NewTimeoutContextWithDefaults() timeouts := framework.NewTimeoutContext()
if d.Timeouts == nil { if d.Timeouts == nil {
return timeouts return timeouts
} }

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessSSHKeyPresent()
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
var err error var err error
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)

View File

@ -141,7 +141,7 @@ func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext {
if d, ok := driver.(CustomTimeoutsTestDriver); ok { if d, ok := driver.(CustomTimeoutsTestDriver); ok {
return d.GetTimeouts() return d.GetTimeouts()
} }
return framework.NewTimeoutContextWithDefaults() return framework.NewTimeoutContext()
} }
// Capability represents a feature that a volume plugin supports // Capability represents a feature that a volume plugin supports

View File

@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
e2eskipper.SkipUnlessProviderIs("aws", "gce") e2eskipper.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
// Enforce binding only within test space via selector labels // Enforce binding only within test space via selector labels
volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace} volLabel = labels.Set{e2epv.VolumeSelectorKey: nameSpace}

View File

@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
client = f.ClientSet client = f.ClientSet
nameSpace = f.Namespace.Name nameSpace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
ginkgo.By("Creating a PVC") ginkgo.By("Creating a PVC")
prefix := "pvc-protection" prefix := "pvc-protection"

View File

@ -110,7 +110,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame
Name: "HDD Regional PD on GCE/GKE", Name: "HDD Regional PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"zones": strings.Join(cloudZones, ","), "zones": strings.Join(cloudZones, ","),
@ -133,7 +133,7 @@ func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *frame
Name: "HDD Regional PD with auto zone selection on GCE/GKE", Name: "HDD Regional PD with auto zone selection on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -173,7 +173,7 @@ func testZonalFailover(ctx context.Context, c clientset.Interface, ns string) {
testSpec := testsuites.StorageClassTest{ testSpec := testsuites.StorageClassTest{
Name: "Regional PD Failover on GCE/GKE", Name: "Regional PD Failover on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
@ -331,7 +331,7 @@ func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns s
Client: c, Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -369,7 +369,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE", Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -397,7 +397,7 @@ func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, n
func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) { func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContext(),
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{

View File

@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
}) })
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
ns = f.Namespace.Name ns = f.Namespace.Name
Bootstrap(f) Bootstrap(f)
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx) nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
ssdlabels = make(map[string]string) ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd" ssdlabels["volume-type"] = "ssd"
vvollabels = make(map[string]string) vvollabels = make(map[string]string)

View File

@ -120,7 +120,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
framework.ExpectNoError(err) framework.ExpectNoError(err)
workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR")
}) })

View File

@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(nodeList.Items) < 2 { if len(nodeList.Items) < 2 {

View File

@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
Bootstrap(f) Bootstrap(f)
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns)
ginkgo.DeferCleanup(func() { ginkgo.DeferCleanup(func() {
if len(node1KeyValueLabel) > 0 { if len(node1KeyValueLabel) > 0 {

View File

@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable))
nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) nodes, err := e2enode.GetReadySchedulableNodes(ctx, client)
framework.ExpectNoError(err) framework.ExpectNoError(err)