mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #50084 from bskiba/ca_tolerate_unready
Automatic merge from submit-queue (batch tested with PRs 48237, 50084, 50019, 50069, 50090) Allow for some pods not to get scheduled in CA tests. This will allow us to ignore long tail node creation or failure to create some nodes when running scalability tests on kubemark. **Release note**: ``` NONE ```
This commit is contained in:
commit
9a26cdfb52
@ -966,7 +966,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
|
|||||||
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
|
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
|
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
|
||||||
var notready []string
|
var notready []string
|
||||||
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
|
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
|
||||||
pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
pods, err := c.Core().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
@ -990,18 +990,22 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
|
|||||||
notready = append(notready, pod.Name)
|
notready = append(notready, pod.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(notready) == 0 {
|
if len(notready) <= tolerateUnreadyCount {
|
||||||
glog.Infof("All pods ready")
|
glog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
glog.Infof("Some pods are not ready yet: %v", notready)
|
glog.Infof("Too many pods are not ready yet: %v", notready)
|
||||||
}
|
}
|
||||||
glog.Info("Timeout on waiting for pods being ready")
|
glog.Info("Timeout on waiting for pods being ready")
|
||||||
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
|
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
|
||||||
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
|
||||||
|
|
||||||
// Some pods are still not running.
|
// Some pods are still not running.
|
||||||
return fmt.Errorf("Some pods are still not running: %v", notready)
|
return fmt.Errorf("Too many pods are still not running: %v", notready)
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
|
||||||
|
return waitForCaPodsReadyInNamespace(f, c, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAnyNode(c clientset.Interface) *v1.Node {
|
func getAnyNode(c clientset.Interface) *v1.Node {
|
||||||
|
Loading…
Reference in New Issue
Block a user