diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index 8df3b60cd6c..b975460087f 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -17,15 +17,12 @@ limitations under the License. package job import ( - "fmt" - "strings" "time" batchv1 "k8s.io/api/batch/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" jobutil "k8s.io/kubernetes/pkg/controller/job" @@ -101,30 +98,6 @@ func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Dura }) } -// EnsureAllJobPodsRunning uses c to check in the Job named jobName in ns -// is running, returning an error if the expected parallelism is not -// satisfied. -func EnsureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { - label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName})) - options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) - if err != nil { - return err - } - podsSummary := make([]string, 0, parallelism) - count := int32(0) - for _, p := range pods.Items { - if p.Status.Phase == v1.PodRunning { - count++ - } - podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message)) - } - if count != parallelism { - return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", ")) - } - return nil -} - // WaitForAllJobPodsGone waits for all pods for the Job named jobName in namespace ns // to be deleted. func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error { diff --git a/test/e2e/upgrades/apps/BUILD b/test/e2e/upgrades/apps/BUILD index ae94a7f636e..080c0134191 100644 --- a/test/e2e/upgrades/apps/BUILD +++ b/test/e2e/upgrades/apps/BUILD @@ -26,6 +26,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", diff --git a/test/e2e/upgrades/apps/job.go b/test/e2e/upgrades/apps/job.go index 515bee7aa82..e71d4359635 100644 --- a/test/e2e/upgrades/apps/job.go +++ b/test/e2e/upgrades/apps/job.go @@ -17,8 +17,14 @@ limitations under the License. package upgrades import ( + "fmt" + "strings" + batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" "k8s.io/kubernetes/test/e2e/upgrades" @@ -54,7 +60,7 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) { func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) { <-done ginkgo.By("Ensuring active pods == parallelism") - err := jobutil.EnsureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) + err := ensureAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2) framework.ExpectNoError(err) } @@ -62,3 +68,27 @@ func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgr func (t *JobUpgradeTest) Teardown(f *framework.Framework) { // rely on the namespace deletion to clean up everything } + +// ensureAllJobPodsRunning uses c to check in the Job named jobName in ns +// is running, returning an error if the expected parallelism is not +// satisfied. +func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { + label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: jobName})) + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err := c.CoreV1().Pods(ns).List(options) + if err != nil { + return err + } + podsSummary := make([]string, 0, parallelism) + count := int32(0) + for _, p := range pods.Items { + if p.Status.Phase == v1.PodRunning { + count++ + } + podsSummary = append(podsSummary, fmt.Sprintf("%s (%s: %s)", p.ObjectMeta.Name, p.Status.Phase, p.Status.Message)) + } + if count != parallelism { + return fmt.Errorf("job has %d of %d expected running pods: %s", count, parallelism, strings.Join(podsSummary, ", ")) + } + return nil +}