diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index b1e0363e703..cb90e597a7f 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -437,17 +437,6 @@ func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns, desiredState str return errStr + buf.String() } -// check if a Pod is controlled by a Replication Controller in the List -func hasReplicationControllersForPod(rcs *api.ReplicationControllerList, pod api.Pod) bool { - for _, rc := range rcs.Items { - selector := labels.SelectorFromSet(rc.Spec.Selector) - if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) { - return true - } - } - return false -} - // WaitForPodsSuccess waits till all labels matching the given selector enter // the Success state. The caller is expected to only invoke this method once the // pods have been created. @@ -490,9 +479,9 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s // WaitForPodsRunningReady waits up to timeout to ensure that all pods in // namespace ns are either running and ready, or failed but controlled by a -// replication controller. Also, it ensures that at least minPods are running -// and ready. It has separate behavior from other 'wait for' pods functions in -// that it requires the list of pods on every iteration. This is useful, for +// controller. Also, it ensures that at least minPods are running and +// ready. It has separate behavior from other 'wait for' pods functions in +// that it requests the list of pods on every iteration. This is useful, for // example, in cluster startup, because the number of pods increases while // waiting. // If ignoreLabels is not empty, pods matching this selector are ignored and @@ -517,17 +506,30 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti }() if wait.PollImmediate(Poll, timeout, func() (bool, error) { - // We get the new list of pods and replication controllers in every - // iteration because more pods come online during startup and we want to - // ensure they are also checked. + // We get the new list of pods, replication controllers, and + // replica sets in every iteration because more pods come + // online during startup and we want to ensure they are also + // checked. + replicas, replicaOk := int32(0), int32(0) + rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{}) if err != nil { Logf("Error getting replication controllers in namespace '%s': %v", ns, err) return false, nil } - replicas := int32(0) for _, rc := range rcList.Items { replicas += rc.Spec.Replicas + replicaOk += rc.Status.ReadyReplicas + } + + rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{}) + if err != nil { + Logf("Error getting replication sets in namespace %q: %v", ns, err) + return false, nil + } + for _, rs := range rsList.Items { + replicas += rs.Spec.Replicas + replicaOk += rs.Status.ReadyReplicas } podList, err := c.Core().Pods(ns).List(api.ListOptions{}) @@ -535,7 +537,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti Logf("Error getting pods in namespace '%s': %v", ns, err) return false, nil } - nOk, replicaOk := int32(0), int32(0) + nOk := int32(0) badPods = []api.Pod{} desiredPods = len(podList.Items) for _, pod := range podList.Items { @@ -545,18 +547,15 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti } if res, err := testutils.PodRunningReady(&pod); res && err == nil { nOk++ - if hasReplicationControllersForPod(rcList, pod) { - replicaOk++ - } } else { if pod.Status.Phase != api.PodFailed { Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) badPods = append(badPods, pod) - } else if !hasReplicationControllersForPod(rcList, pod) { - Logf("Pod %s is Failed, but it's not controlled by a ReplicationController", pod.ObjectMeta.Name) + } else if _, ok := pod.Annotations[api.CreatedByAnnotation]; !ok { + Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) badPods = append(badPods, pod) } - //ignore failed pods that are controlled by a replication controller + //ignore failed pods that are controlled by some controller } }