Merge pull request #61718 from liggitt/narrow-scheduler-e2e-check

Automatic merge from submit-queue (batch tested with PRs 65116, 61718, 65140, 65128, 65099). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Narrow e2e pre-check on scheduler predicates

WaitForAllNodesHealthy actually checks for optional add-ons running on all nodes. The scheduler tests only care about node readiness, so just check for that.

Hit this running scheduler e2e tests downstream on clusters that do not deploy those optional add-ons. The test would just hang and fail unnecessarily

/sig scheduling

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-06-21 13:59:09 -07:00 committed by GitHub
commit 970b12c258
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -60,12 +60,10 @@ type pausePodConfig struct {
var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var systemPodsNo int
var totalPodCapacity int64
var RCName string
var ns string
f := framework.NewDefaultFramework("sched-pred")
ignoreLabels := framework.ImagePullerLabels
AfterEach(func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
@ -81,30 +79,12 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ns = f.Namespace.Name
nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
framework.AllNodesReady(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
err := framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)