diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index 2bb9a879823..cad8aa1a021 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -34,7 +34,7 @@ import ( var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() { f := framework.NewDefaultFramework("autoscaling") - SIGDescribe("Autoscaling a service", func() { + ginkgo.Describe("Autoscaling a service", func() { ginkgo.BeforeEach(func() { // Check if Cloud Autoscaler is enabled by trying to get its ConfigMap. _, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{}) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index bc9d51f790b..9e277b7e6cc 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -34,7 +34,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5" titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1" - SIGDescribe("[Serial] [Slow] Deployment", func() { + ginkgo.Describe("[Serial] [Slow] Deployment", func() { // CPU tests via deployments ginkgo.It(titleUp, func() { scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f) @@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - SIGDescribe("[Serial] [Slow] ReplicaSet", func() { + ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { // CPU tests via ReplicaSets ginkgo.It(titleUp, func() { scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f) @@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) // These tests take ~20 minutes each. - SIGDescribe("[Serial] [Slow] ReplicationController", func() { + ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { // CPU tests via replication controllers ginkgo.It(titleUp+" and verify decision stability", func() { scaleUp("rc", e2eautoscaling.KindRC, true, f) @@ -65,7 +65,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - SIGDescribe("ReplicationController light", func() { + ginkgo.Describe("ReplicationController light", func() { ginkgo.It("Should scale from 1 pod to 2 pods", func() { scaleTest := &HPAScaleTest{ initPods: 1, diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index 35064865453..ed71ac50c0c 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -276,7 +276,7 @@ var _ = SIGDescribe("kubelet", func() { ns = f.Namespace.Name }) - SIGDescribe("Clean up pods on node", func() { + ginkgo.Describe("Clean up pods on node", func() { var ( numNodes int nodeNames sets.String @@ -384,7 +384,7 @@ var _ = SIGDescribe("kubelet", func() { }) // Test host cleanup when disrupting the volume environment. - SIGDescribe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() { + ginkgo.Describe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() { type hostCleanupTest struct { itDescr string diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index a2668264c81..5d8406bb0f5 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -216,7 +216,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { result := om.GetLatestRuntimeOperationErrorRate() framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result)) }) - SIGDescribe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() { + ginkgo.Describe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() { // We assume that the scheduler will make reasonable scheduling choices // and assign ~N pods on the node. // Although we want to track N pods per node, there are N + add-on pods @@ -268,7 +268,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { }) } }) - SIGDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { + ginkgo.Describe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { density := []int{100} for i := range density { podsPerNode := density[i] diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index fce1e8355b7..b8f1d0f4b93 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) - utils.SIGDescribe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { + ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("vsphere") Bootstrap(f) diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 906e94787b1..3245f18569b 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele }) - utils.SIGDescribe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() { + ginkgo.Describe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() { ginkgo.AfterEach(func() { ginkgo.By("Running clean up actions") if framework.ProviderIs("vsphere") {