mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Remove nested SIGDescribe calls
This commit is contained in:
parent
5b0d0451ff
commit
f277e4d32a
@ -34,7 +34,7 @@ import (
|
|||||||
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
||||||
f := framework.NewDefaultFramework("autoscaling")
|
f := framework.NewDefaultFramework("autoscaling")
|
||||||
|
|
||||||
SIGDescribe("Autoscaling a service", func() {
|
ginkgo.Describe("Autoscaling a service", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
|
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
|
||||||
|
@ -34,7 +34,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
|||||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||||
|
|
||||||
SIGDescribe("[Serial] [Slow] Deployment", func() {
|
ginkgo.Describe("[Serial] [Slow] Deployment", func() {
|
||||||
// CPU tests via deployments
|
// CPU tests via deployments
|
||||||
ginkgo.It(titleUp, func() {
|
ginkgo.It(titleUp, func() {
|
||||||
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
|
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
|
||||||
@ -44,7 +44,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() {
|
||||||
// CPU tests via ReplicaSets
|
// CPU tests via ReplicaSets
|
||||||
ginkgo.It(titleUp, func() {
|
ginkgo.It(titleUp, func() {
|
||||||
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
|
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
|
||||||
@ -55,7 +55,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
|||||||
})
|
})
|
||||||
|
|
||||||
// These tests take ~20 minutes each.
|
// These tests take ~20 minutes each.
|
||||||
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
|
ginkgo.Describe("[Serial] [Slow] ReplicationController", func() {
|
||||||
// CPU tests via replication controllers
|
// CPU tests via replication controllers
|
||||||
ginkgo.It(titleUp+" and verify decision stability", func() {
|
ginkgo.It(titleUp+" and verify decision stability", func() {
|
||||||
scaleUp("rc", e2eautoscaling.KindRC, true, f)
|
scaleUp("rc", e2eautoscaling.KindRC, true, f)
|
||||||
@ -65,7 +65,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
SIGDescribe("ReplicationController light", func() {
|
ginkgo.Describe("ReplicationController light", func() {
|
||||||
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
||||||
scaleTest := &HPAScaleTest{
|
scaleTest := &HPAScaleTest{
|
||||||
initPods: 1,
|
initPods: 1,
|
||||||
|
@ -276,7 +276,7 @@ var _ = SIGDescribe("kubelet", func() {
|
|||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
})
|
})
|
||||||
|
|
||||||
SIGDescribe("Clean up pods on node", func() {
|
ginkgo.Describe("Clean up pods on node", func() {
|
||||||
var (
|
var (
|
||||||
numNodes int
|
numNodes int
|
||||||
nodeNames sets.String
|
nodeNames sets.String
|
||||||
@ -384,7 +384,7 @@ var _ = SIGDescribe("kubelet", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Test host cleanup when disrupting the volume environment.
|
// Test host cleanup when disrupting the volume environment.
|
||||||
SIGDescribe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() {
|
ginkgo.Describe("host cleanup with volume mounts [sig-storage][HostCleanup][Flaky]", func() {
|
||||||
|
|
||||||
type hostCleanupTest struct {
|
type hostCleanupTest struct {
|
||||||
itDescr string
|
itDescr string
|
||||||
|
@ -216,7 +216,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
|||||||
result := om.GetLatestRuntimeOperationErrorRate()
|
result := om.GetLatestRuntimeOperationErrorRate()
|
||||||
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
|
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
|
||||||
})
|
})
|
||||||
SIGDescribe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
|
ginkgo.Describe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
|
||||||
// We assume that the scheduler will make reasonable scheduling choices
|
// We assume that the scheduler will make reasonable scheduling choices
|
||||||
// and assign ~N pods on the node.
|
// and assign ~N pods on the node.
|
||||||
// Although we want to track N pods per node, there are N + add-on pods
|
// Although we want to track N pods per node, there are N + add-on pods
|
||||||
@ -268,7 +268,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
SIGDescribe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
|
ginkgo.Describe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() {
|
||||||
density := []int{100}
|
density := []int{100}
|
||||||
for i := range density {
|
for i := range density {
|
||||||
podsPerNode := density[i]
|
podsPerNode := density[i]
|
||||||
|
@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
|||||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||||
})
|
})
|
||||||
|
|
||||||
utils.SIGDescribe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
|
ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("vsphere")
|
e2eskipper.SkipUnlessProviderIs("vsphere")
|
||||||
Bootstrap(f)
|
Bootstrap(f)
|
||||||
|
@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
|
|||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
utils.SIGDescribe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() {
|
ginkgo.Describe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() {
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
ginkgo.By("Running clean up actions")
|
ginkgo.By("Running clean up actions")
|
||||||
if framework.ProviderIs("vsphere") {
|
if framework.ProviderIs("vsphere") {
|
||||||
|
Loading…
Reference in New Issue
Block a user