mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #18900 from ihmccreery/serial-tests
Auto commit by PR queue bot
This commit is contained in:
commit
7f5123f61f
@ -325,12 +325,7 @@ AWS_REQUIRED_SKIP_TESTS=(
|
||||
|
||||
# Tests which kills or restarts components and/or nodes.
|
||||
DISRUPTIVE_TESTS=(
|
||||
"DaemonRestart"
|
||||
"Etcd\sfailure"
|
||||
"Nodes\sNetwork"
|
||||
"Nodes\sResize"
|
||||
"Reboot"
|
||||
"Services.*restarting"
|
||||
"\[Disruptive\]"
|
||||
)
|
||||
|
||||
# The following tests are known to be flaky, and are thus run only in their own
|
||||
@ -366,16 +361,12 @@ GCE_SLOW_TESTS=(
|
||||
)
|
||||
|
||||
# Tests which are not able to be run in parallel.
|
||||
#
|
||||
# TODO(ihmccreery) I'd like to get these combined with DISRUPTIVE_TESTS.
|
||||
GCE_PARALLEL_SKIP_TESTS=(
|
||||
"GCE\sL7\sLoadBalancer\sController" # namespaced watch flakes, issue: #17805
|
||||
"Nodes\sNetwork"
|
||||
"MaxPods"
|
||||
"Resource\susage\sof\ssystem\scontainers"
|
||||
"SchedulerPredicates"
|
||||
"resource\susage\stracking"
|
||||
"NodeOutOfDisk"
|
||||
"${DISRUPTIVE_TESTS[@]}"
|
||||
)
|
||||
"\[Serial\]"
|
||||
"\[Disruptive\]"
|
||||
)
|
||||
|
||||
# Tests which are known to be flaky when run in parallel.
|
||||
GCE_PARALLEL_FLAKY_TESTS=(
|
||||
|
@ -183,7 +183,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele
|
||||
return failedContainers, containerRestartNodes.List()
|
||||
}
|
||||
|
||||
var _ = Describe("DaemonRestart", func() {
|
||||
var _ = Describe("DaemonRestart [Disruptive]", func() {
|
||||
|
||||
framework := NewFramework("daemonrestart")
|
||||
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Etcd failure", func() {
|
||||
var _ = Describe("Etcd failure [Disruptive]", func() {
|
||||
|
||||
var skipped bool
|
||||
framework := NewFramework("etcd-failure")
|
||||
|
@ -376,7 +376,7 @@ func (cont *IngressController) Cleanup(del bool) error {
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
var _ = Describe("GCE L7 LoadBalancer Controller", func() {
|
||||
var _ = Describe("GCE L7 LoadBalancer Controller [Serial]", func() {
|
||||
// These variables are initialized after framework's beforeEach.
|
||||
var ns string
|
||||
var addonDir string
|
||||
|
@ -138,7 +138,7 @@ func verifyCPULimits(expected containersCPUSummary, actual nodesCPUSummary) {
|
||||
}
|
||||
}
|
||||
|
||||
var _ = Describe("Kubelet", func() {
|
||||
var _ = Describe("Kubelet [Serial]", func() {
|
||||
var nodeNames sets.String
|
||||
framework := NewFramework("kubelet-perf")
|
||||
var rm *resourceMonitor
|
||||
|
@ -72,7 +72,7 @@ func computeAverage(sliceOfUsages []resourceUsagePerContainer) (result resourceU
|
||||
|
||||
// This tests does nothing except checking current resource usage of containers defined in kubelet_stats systemContainers variable.
|
||||
// Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage.
|
||||
var _ = Describe("Resource usage of system containers", func() {
|
||||
var _ = Describe("Resource usage of system containers [Serial]", func() {
|
||||
var c *client.Client
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
|
@ -63,7 +63,7 @@ const (
|
||||
// choose that node to be node with index 1.
|
||||
// 7. Observe that the pod in pending status schedules on that node.
|
||||
//
|
||||
var _ = Describe("NodeOutOfDisk", func() {
|
||||
var _ = Describe("NodeOutOfDisk [Serial]", func() {
|
||||
var c *client.Client
|
||||
var unfilledNodeName, recoveredNodeName string
|
||||
framework := Framework{BaseName: "node-outofdisk"}
|
||||
|
@ -44,7 +44,7 @@ const (
|
||||
rebootPodReadyAgainTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var _ = Describe("Reboot", func() {
|
||||
var _ = Describe("Reboot [Disruptive]", func() {
|
||||
var f *Framework
|
||||
|
||||
BeforeEach(func() {
|
||||
|
@ -388,7 +388,7 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
|
||||
// network traffic is unblocked in a deferred function
|
||||
}
|
||||
|
||||
var _ = Describe("Nodes", func() {
|
||||
var _ = Describe("Nodes [Disruptive]", func() {
|
||||
framework := NewFramework("resize-nodes")
|
||||
var systemPodsNo int
|
||||
var c *client.Client
|
||||
|
@ -173,7 +173,7 @@ func waitForStableCluster(c *client.Client) int {
|
||||
return len(scheduledPods)
|
||||
}
|
||||
|
||||
var _ = Describe("SchedulerPredicates", func() {
|
||||
var _ = Describe("SchedulerPredicates [Serial]", func() {
|
||||
var c *client.Client
|
||||
var nodeList *api.NodeList
|
||||
var totalPodCapacity int64
|
||||
|
@ -257,7 +257,7 @@ var _ = Describe("Services", func() {
|
||||
expectNoError(stopServeHostnameService(c, ns, "service3"))
|
||||
})
|
||||
|
||||
It("should work after restarting kube-proxy", func() {
|
||||
It("should work after restarting kube-proxy [Disruptive]", func() {
|
||||
SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
ns := f.Namespace.Name
|
||||
@ -308,7 +308,7 @@ var _ = Describe("Services", func() {
|
||||
expectNoError(verifyServeHostnameServiceUp(c, host, podNames2, svc2IP, servicePort))
|
||||
})
|
||||
|
||||
It("should work after restarting apiserver", func() {
|
||||
It("should work after restarting apiserver [Disruptive]", func() {
|
||||
// TODO: restartApiserver doesn't work in GKE - fix it and reenable this test.
|
||||
SkipUnlessProviderIs("gce")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user