mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #78805 from spiffxp/slow-tagging
tag some release-blocking tests taking over 5min as [Slow]
This commit is contained in:
commit
a5b80f5112
@ -122,7 +122,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
|||||||
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
|
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
|
ginkgo.It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func() {
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
client := c.CoreV1().PodTemplates(ns)
|
client := c.CoreV1().PodTemplates(ns)
|
||||||
|
@ -576,7 +576,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
Testname: StatefulSet, Scaling
|
Testname: StatefulSet, Scaling
|
||||||
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||||
*/
|
*/
|
||||||
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
|
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func() {
|
||||||
psLabels := klabels.Set(labels)
|
psLabels := klabels.Set(labels)
|
||||||
ginkgo.By("Initializing watcher for selector " + psLabels.String())
|
ginkgo.By("Initializing watcher for selector " + psLabels.String())
|
||||||
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||||
@ -661,7 +661,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
Testname: StatefulSet, Burst Scaling
|
Testname: StatefulSet, Burst Scaling
|
||||||
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
|
||||||
*/
|
*/
|
||||||
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", func() {
|
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func() {
|
||||||
psLabels := klabels.Set(labels)
|
psLabels := klabels.Set(labels)
|
||||||
|
|
||||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||||
|
@ -79,7 +79,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
|||||||
}
|
}
|
||||||
scaleTest.run("rc-light", common.KindRC, rc, f)
|
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||||
})
|
})
|
||||||
ginkgo.It("Should scale from 2 pods to 1 pod", func() {
|
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
|
||||||
scaleTest := &HPAScaleTest{
|
scaleTest := &HPAScaleTest{
|
||||||
initPods: 2,
|
initPods: 2,
|
||||||
totalInitialCPUUsage: 50,
|
totalInitialCPUUsage: 50,
|
||||||
|
@ -128,21 +128,24 @@ var (
|
|||||||
|
|
||||||
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
|
// XfsInlineVolume is TestPattern for "Inline-volume (xfs)"
|
||||||
XfsInlineVolume = TestPattern{
|
XfsInlineVolume = TestPattern{
|
||||||
Name: "Inline-volume (xfs)",
|
Name: "Inline-volume (xfs)",
|
||||||
VolType: InlineVolume,
|
VolType: InlineVolume,
|
||||||
FsType: "xfs",
|
FsType: "xfs",
|
||||||
|
FeatureTag: "[Slow]",
|
||||||
}
|
}
|
||||||
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
|
// XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)"
|
||||||
XfsPreprovisionedPV = TestPattern{
|
XfsPreprovisionedPV = TestPattern{
|
||||||
Name: "Pre-provisioned PV (xfs)",
|
Name: "Pre-provisioned PV (xfs)",
|
||||||
VolType: PreprovisionedPV,
|
VolType: PreprovisionedPV,
|
||||||
FsType: "xfs",
|
FsType: "xfs",
|
||||||
|
FeatureTag: "[Slow]",
|
||||||
}
|
}
|
||||||
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
|
// XfsDynamicPV is TestPattern for "Dynamic PV (xfs)"
|
||||||
XfsDynamicPV = TestPattern{
|
XfsDynamicPV = TestPattern{
|
||||||
Name: "Dynamic PV (xfs)",
|
Name: "Dynamic PV (xfs)",
|
||||||
VolType: DynamicPV,
|
VolType: DynamicPV,
|
||||||
FsType: "xfs",
|
FsType: "xfs",
|
||||||
|
FeatureTag: "[Slow]",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Definitions for ntfs
|
// Definitions for ntfs
|
||||||
|
@ -236,7 +236,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern
|
|||||||
}
|
}
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||||
ginkgo.It("should fail in binding dynamic provisioned PV to PVC", func() {
|
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow]", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user