From 119f4df9c5cb3315a31fbc1457ff84b17aebc681 Mon Sep 17 00:00:00 2001 From: tanjunchen Date: Mon, 17 Feb 2020 23:31:50 +0800 Subject: [PATCH] test/e2e/framework:remove unused code and move const --- test/e2e/e2e.go | 10 +++++++++- .../framework/autoscaling/autoscaling_utils.go | 1 - .../framework/providers/gce/recreate_node.go | 10 ++++++++-- test/e2e/framework/util.go | 17 ++--------------- test/e2e/storage/pvc_protection.go | 11 ++++++++--- 5 files changed, 27 insertions(+), 22 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index f9b073ed3ec..d5f5082b213 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -60,6 +60,14 @@ import ( _ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere" ) +const ( + // namespaceCleanupTimeout is how long to wait for the namespace to be deleted. + // If there are any orphaned namespaces to clean up, this test is running + // on a long lived cluster. A long wait here is preferably to spurious test + // failures caused by leaked resources from a previous test run. + namespaceCleanupTimeout = 15 * time.Minute +) + var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // Reference common test to make the import valid. commontest.CurrentSuite = commontest.E2E @@ -232,7 +240,7 @@ func setupSuite() { framework.Failf("Error deleting orphaned namespaces: %v", err) } klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) - if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil { + if err := framework.WaitForNamespacesDeleted(c, deleted, namespaceCleanupTimeout); err != nil { framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) } } diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index c9a7e0f7360..8cca7e608d5 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -45,7 +45,6 @@ import ( const ( dynamicConsumptionTimeInSeconds = 30 - staticConsumptionTimeInSeconds = 3600 dynamicRequestSizeInMillicores = 100 dynamicRequestSizeInMegabytes = 100 dynamicRequestSizeCustomMetric = 10 diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index b3ae13dda9e..4db44096217 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -34,6 +34,12 @@ import ( testutils "k8s.io/kubernetes/test/utils" ) +const ( + // recreateNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is recreated before + // the test is considered failed. + recreateNodeReadyAgainTimeout = 10 * time.Minute +) + func nodeNames(nodes []v1.Node) []string { result := make([]string, 0, len(nodes)) for i := range nodes { @@ -102,9 +108,9 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace framework.Failf("Test failed; failed to start the restart instance group command.") } - err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout) + err = WaitForNodeBootIdsToChange(c, nodes, recreateNodeReadyAgainTimeout) if err != nil { - framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout) + framework.Failf("Test failed; failed to recreate at least one node in %v.", recreateNodeReadyAgainTimeout) } nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 13318dca302..8c836c1c452 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -103,12 +103,6 @@ const ( // PodEventTimeout is how much we wait for a pod event to occur. PodEventTimeout = 2 * time.Minute - // NamespaceCleanupTimeout is how long to wait for the namespace to be deleted. - // If there are any orphaned namespaces to clean up, this test is running - // on a long lived cluster. A long wait here is preferably to spurious test - // failures caused by leaked resources from a previous test run. - NamespaceCleanupTimeout = 15 * time.Minute - // ServiceStartTimeout is how long to wait for a service endpoint to be resolvable. ServiceStartTimeout = 3 * time.Minute @@ -138,12 +132,8 @@ const ( // Use it case by case when we are sure this timeout is enough. ClaimProvisionShortTimeout = 1 * time.Minute - // ClaimDeletingTimeout is How long claims have to become deleted. - ClaimDeletingTimeout = 3 * time.Minute - - // RecreateNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is recreated before - // the test is considered failed. - RecreateNodeReadyAgainTimeout = 10 * time.Minute + // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. + ClaimProvisionTimeout = 5 * time.Minute // RestartNodeReadyAgainTimeout is how long a node is allowed to become "Ready" after it is restarted before // the test is considered failed. @@ -171,9 +161,6 @@ const ( ) var ( - // ClaimProvisionTimeout is how long claims have to become dynamically provisioned. - ClaimProvisionTimeout = 5 * time.Minute - // BusyBoxImage is the image URI of BusyBox. BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) diff --git a/test/e2e/storage/pvc_protection.go b/test/e2e/storage/pvc_protection.go index 9c2ff9b41a8..91d81a52b21 100644 --- a/test/e2e/storage/pvc_protection.go +++ b/test/e2e/storage/pvc_protection.go @@ -36,6 +36,11 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" ) +const ( + // claimDeletingTimeout is How long claims have to become deleted. + claimDeletingTimeout = 3 * time.Minute +) + // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) @@ -112,7 +117,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { ginkgo.By("Deleting the PVC") err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.NewDeleteOptions(0)) framework.ExpectNoError(err, "Error deleting PVC") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) @@ -131,7 +136,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) @@ -163,7 +168,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() { framework.ExpectNoError(err, "Error terminating and deleting pod") ginkgo.By("Checking that the PVC is automatically removed from the system because it's no longer in active use by a pod") - waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimDeletingTimeout) + waitForPersistentVolumeClaimDeleted(client, pvc.Namespace, pvc.Name, framework.Poll, claimDeletingTimeout) pvcCreatedAndNotDeleted = false }) })