diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index d68db210eea..924cb0949f6 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -4080,3 +4080,19 @@ func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*a } return nil, fmt.Errorf("Too many retries updating Pod %q", name) } + +func GetPodsInNamespace(c *client.Client, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) { + pods, err := c.Pods(ns).List(api.ListOptions{}) + if err != nil { + return []*api.Pod{}, err + } + ignoreSelector := labels.SelectorFromSet(ignoreLabels) + filtered := []*api.Pod{} + for _, p := range pods.Items { + if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) { + continue + } + filtered = append(filtered, &p) + } + return filtered, nil +} diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 2823519099d..c2b25e4647f 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -346,12 +346,14 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { var systemPodsNo int32 var c *client.Client var ns string + ignoreLabels := framework.ImagePullerLabels BeforeEach(func() { c = f.Client ns = f.Namespace.Name - systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) + systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) Expect(err).NotTo(HaveOccurred()) - systemPodsNo = int32(len(systemPods.Items)) + systemPodsNo = int32(len(systemPods)) + }) // Slow issue #13323 (8 min) @@ -396,7 +398,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() { // the cluster is restored to health. By("waiting for system pods to successfully restart") - err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, framework.ImagePullerLabels) + err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels) Expect(err).NotTo(HaveOccurred()) }) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index e1a41c7ecd2..d8bf857404e 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -155,6 +155,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { var totalPodCapacity int64 var RCName string var ns string + ignoreLabels := framework.ImagePullerLabels AfterEach(func() { rc, err := c.ReplicationControllers(ns).Get(RCName) @@ -187,16 +188,16 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { // Every test case in this suite assumes that cluster add-on pods stay stable and // cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) + systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels) Expect(err).NotTo(HaveOccurred()) systemPodsNo = 0 - for _, pod := range systemPods.Items { + for _, pod := range systemPods { if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil { systemPodsNo++ } } - err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, framework.ImagePullerLabels) + err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels) Expect(err).NotTo(HaveOccurred()) for _, node := range nodeList.Items {