From d65cab42bf1f494d151179b47a73c40fa3543bb8 Mon Sep 17 00:00:00 2001 From: Dawn Chen Date: Thu, 4 Feb 2016 17:26:42 -0800 Subject: [PATCH] Scheduler predicate test explicitly assumes that cluster add-on pods stay stable, but never check it before each test run. Adding the check for each test case. --- test/e2e/scheduler_predicates.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 3807af75d0e..a1155008bdf 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -179,6 +179,7 @@ func waitForStableCluster(c *client.Client) int { var _ = Describe("SchedulerPredicates [Serial]", func() { var c *client.Client var nodeList *api.NodeList + var systemPodsNo int var totalPodCapacity int64 var RCName string var ns string @@ -198,6 +199,16 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { c = framework.Client ns = framework.Namespace.Name nodeList = ListSchedulableNodesOrDie(c) + + // Every test case in this suite assumes that cluster add-on pods stay stable and + // cannot be run in parallel with any other test that touches Nodes or Pods. + // It is so because we need to have precise control on what's running in the cluster. + systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + systemPodsNo = len(systemPods.Items) + + err = waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) + Expect(err).NotTo(HaveOccurred()) }) // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable @@ -284,7 +295,7 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { _, found := nodeToCapacityMap[pod.Spec.NodeName] Expect(found).To(Equal(true)) if pod.Status.Phase == api.PodRunning { - Logf("Pod %v requesting capacity %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) + Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) } }