mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
cleanup: remove TODO at e2e scheduling preemption test
Signed-off-by: Konstantin Misyutin <konstantin.misyutin@huawei.com>
This commit is contained in:
parent
1d0e5ac583
commit
351f4e9c9c
@ -435,6 +435,21 @@ func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods
|
|||||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForNumberOfPods waits up to timeout to ensure there are exact
|
||||||
|
// `num` pods in namespace `ns`.
|
||||||
|
// It returns the matching Pods or a timeout error.
|
||||||
|
func WaitForNumberOfPods(c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||||
|
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
// ignore intermittent network error
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return len(pods.Items) == num, nil
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||||
// matching pod exists. Return the list of matching pods.
|
// matching pod exists. Return the list of matching pods.
|
||||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||||
|
@ -430,22 +430,12 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
ginkgo.By("Verify there are 3 Pods left in this namespace")
|
ginkgo.By("Verify there are 3 Pods left in this namespace")
|
||||||
wantPods := sets.NewString("high", "medium", "low")
|
wantPods := sets.NewString("high", "medium", "low")
|
||||||
|
|
||||||
var pods []v1.Pod
|
|
||||||
// Wait until the number of pods stabilizes. Note that `medium` pod can get scheduled once the
|
// Wait until the number of pods stabilizes. Note that `medium` pod can get scheduled once the
|
||||||
// second low priority pod is marked as terminating.
|
// second low priority pod is marked as terminating.
|
||||||
// TODO: exact the wait.PollImmediate block to framework.WaitForNumberOfRunningPods.
|
pods, err := e2epod.WaitForNumberOfPods(cs, ns, 3, framework.PollShortTimeout)
|
||||||
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
|
|
||||||
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
|
||||||
// ignore intermittent network error
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
pods = podList.Items
|
|
||||||
return len(pods) == 3, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods.Items {
|
||||||
// Remove the ordinal index for low pod.
|
// Remove the ordinal index for low pod.
|
||||||
podName := strings.Split(pod.Name, "-")[0]
|
podName := strings.Split(pod.Name, "-")[0]
|
||||||
if wantPods.Has(podName) {
|
if wantPods.Has(podName) {
|
||||||
|
Loading…
Reference in New Issue
Block a user