mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #99710 from wojtek-t/cleanup_describe_15
Remove nested SIGDescribe calls
This commit is contained in:
commit
f13c70e864
@ -34,7 +34,7 @@ import (
|
||||
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
||||
f := framework.NewDefaultFramework("autoscaling")
|
||||
|
||||
SIGDescribe("Autoscaling a service", func() {
|
||||
ginkgo.Describe("Autoscaling a service", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
|
||||
|
@ -34,7 +34,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||
|
||||
SIGDescribe("[Serial] [Slow] Deployment", func() {
|
||||
ginkgo.Describe("[Serial] [Slow] Deployment", func() {
|
||||
// CPU tests via deployments
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
|
||||
@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
|
||||
// CPU tests via ReplicaSets
|
||||
ginkgo.It(titleUp, func() {
|
||||
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
|
||||
@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
||||
})
|
||||
|
||||
// These tests take ~20 minutes each.
|
||||
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
|
||||
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
ginkgo.It(titleUp+" and verify decision stability", func() {
|
||||
scaleUp("rc", e2eautoscaling.KindRC, true, f)
|
||||
@ -65,7 +65,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
ginkgo.Describe("ReplicationController light", func() {
|
||||
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
|
@ -276,7 +276,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
SIGDescribe("Clean up pods on node", func() {
|
||||
ginkgo.Describe("Clean up pods on node", func() {
|
||||
var (
|
||||
numNodes int
|
||||
nodeNames sets.String
|
||||
@ -384,7 +384,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
})
|
||||
|
||||
// Test host cleanup when disrupting the volume environment.
|
||||
SIGDescribe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() {
|
||||
ginkgo.Describe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() {
|
||||
|
||||
type hostCleanupTest struct {
|
||||
itDescr string
|
||||
|
@ -216,7 +216,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
||||
result := om.GetLatestRuntimeOperationErrorRate()
|
||||
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
|
||||
})
|
||||
SIGDescribe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
|
||||
ginkgo.Describe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
|
||||
// We assume that the scheduler will make reasonable scheduling choices
|
||||
// and assign ~N pods on the node.
|
||||
// Although we want to track N pods per node, there are N + add-on pods
|
||||
@ -268,7 +268,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
||||
})
|
||||
}
|
||||
})
|
||||
SIGDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
|
||||
ginkgo.Describe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
|
||||
density := []int{100}
|
||||
for i := range density {
|
||||
podsPerNode := density[i]
|
||||
|
@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
})
|
||||
|
||||
utils.SIGDescribe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
|
||||
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
e2eskipper.SkipUnlessProviderIs("vsphere")
|
||||
Bootstrap(f)
|
||||
|
@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
|
||||
|
||||
})
|
||||
|
||||
utils.SIGDescribe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() {
|
||||
ginkgo.Describe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Running clean up actions")
|
||||
if framework.ProviderIs("vsphere") {
|
||||
|
Loading…
Reference in New Issue
Block a user