Added a sleep for the podGC to catch up.

The functionality used to exist entirely in the NC which would
previously clean up pods and nodes together. Now, we simply
wait for the PodGC to see that the node is now deleted and clean up the
pods. This may take a while and hence we set a 1 minute timeout.
This commit is contained in:
Anirudh 2016-11-03 16:45:16 -07:00
parent 27118fe23a
commit 9d0f1c2448
2 changed files with 3 additions and 23 deletions

View File

@ -1404,27 +1404,6 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
return err
}
// WaitForPodAddition waits for pods to be added within the timeout.
func WaitForPodAddition(c clientset.Interface, ns string, timeout time.Duration) error {
options := api.ListOptions{FieldSelector: fields.Set{
"metadata.namespace": ns,
}.AsSelector()}
w, err := c.Core().Pods(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Added:
return true, nil
}
Logf("Waiting for pod(s) to be added in namespace %v", ns)
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)

View File

@ -307,8 +307,9 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
err = framework.WaitForClusterSize(c, int(replicas-1), 10*time.Minute)
Expect(err).NotTo(HaveOccurred())
By("waiting for podGC to remove/recreate any pods scheduled on the now non-existent node")
framework.WaitForPodAddition(c, ns, 2*time.Minute)
By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
"the now non-existent node and the RC to recreate it")
time.Sleep(time.Minute)
By("verifying whether the pods from the removed node are recreated")
err = framework.VerifyPods(c, ns, name, true, replicas)