mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
e2e: fix the expectation of always running kube-system pods
Instruct the tests to ignore image prepull pods.
This commit is contained in:
parent
4357b8a0a6
commit
17928cc1dc
@ -4080,3 +4080,19 @@ func UpdatePodWithRetries(client *client.Client, ns, name string, update func(*a
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
|
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetPodsInNamespace(c *client.Client, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) {
|
||||||
|
pods, err := c.Pods(ns).List(api.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return []*api.Pod{}, err
|
||||||
|
}
|
||||||
|
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||||
|
filtered := []*api.Pod{}
|
||||||
|
for _, p := range pods.Items {
|
||||||
|
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, &p)
|
||||||
|
}
|
||||||
|
return filtered, nil
|
||||||
|
}
|
||||||
|
@ -346,12 +346,14 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
|||||||
var systemPodsNo int32
|
var systemPodsNo int32
|
||||||
var c *client.Client
|
var c *client.Client
|
||||||
var ns string
|
var ns string
|
||||||
|
ignoreLabels := framework.ImagePullerLabels
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
c = f.Client
|
c = f.Client
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{})
|
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
systemPodsNo = int32(len(systemPods.Items))
|
systemPodsNo = int32(len(systemPods))
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Slow issue #13323 (8 min)
|
// Slow issue #13323 (8 min)
|
||||||
@ -396,7 +398,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
|||||||
// the cluster is restored to health.
|
// the cluster is restored to health.
|
||||||
By("waiting for system pods to successfully restart")
|
By("waiting for system pods to successfully restart")
|
||||||
|
|
||||||
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
|
err := framework.WaitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -155,6 +155,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
var totalPodCapacity int64
|
var totalPodCapacity int64
|
||||||
var RCName string
|
var RCName string
|
||||||
var ns string
|
var ns string
|
||||||
|
ignoreLabels := framework.ImagePullerLabels
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
rc, err := c.ReplicationControllers(ns).Get(RCName)
|
rc, err := c.ReplicationControllers(ns).Get(RCName)
|
||||||
@ -187,16 +188,16 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
// Every test case in this suite assumes that cluster add-on pods stay stable and
|
// Every test case in this suite assumes that cluster add-on pods stay stable and
|
||||||
// cannot be run in parallel with any other test that touches Nodes or Pods.
|
// cannot be run in parallel with any other test that touches Nodes or Pods.
|
||||||
// It is so because we need to have precise control on what's running in the cluster.
|
// It is so because we need to have precise control on what's running in the cluster.
|
||||||
systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{})
|
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
systemPodsNo = 0
|
systemPodsNo = 0
|
||||||
for _, pod := range systemPods.Items {
|
for _, pod := range systemPods {
|
||||||
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
|
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
|
||||||
systemPodsNo++
|
systemPodsNo++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, framework.ImagePullerLabels)
|
err = framework.WaitForPodsRunningReady(api.NamespaceSystem, int32(systemPodsNo), framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
|
Loading…
Reference in New Issue
Block a user