diff --git a/test/e2e/BUILD b/test/e2e/BUILD index 33ea5709d42..6f673436a3d 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -68,6 +68,7 @@ go_library( "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/aws:go_default_library", "//test/e2e/framework/providers/azure:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", diff --git a/test/e2e/apimachinery/BUILD b/test/e2e/apimachinery/BUILD index 1b8ff61b354..c3e240a3081 100644 --- a/test/e2e/apimachinery/BUILD +++ b/test/e2e/apimachinery/BUILD @@ -84,6 +84,7 @@ go_library( "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils:go_default_library", "//test/utils/crd:go_default_library", diff --git a/test/e2e/apimachinery/aggregator.go b/test/e2e/apimachinery/aggregator.go index 396254897ff..d5204b9ff96 100644 --- a/test/e2e/apimachinery/aggregator.go +++ b/test/e2e/apimachinery/aggregator.go @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1" "k8s.io/utils/pointer" @@ -382,7 +383,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl if currentPods != nil { for _, pod := range currentPods.Items { for _, container := range pod.Spec.Containers { - logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name) + logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name) e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs) } } diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index 5ae4daf346a..642df51c872 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -111,7 +112,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) { framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name) ginkgo.By("Waiting for the pod to have running status") - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) ginkgo.By("Deleting the namespace") err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil) diff --git a/test/e2e/apimachinery/webhook.go b/test/e2e/apimachinery/webhook.go index 7cbc6e71736..ce02ab84fd1 100644 --- a/test/e2e/apimachinery/webhook.go +++ b/test/e2e/apimachinery/webhook.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" @@ -818,7 +819,7 @@ func testAttachingPodWebhook(f *framework.Framework) { pod := toBeAttachedPod(f) _, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name) - err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name) framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name) ginkgo.By("'kubectl attach' the pod, should be denied by the webhook") diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 1b4190f9762..22718f848d6 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -65,6 +65,7 @@ go_library( "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 34a37e8cfc0..9cd52901909 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/replicaset" testutil "k8s.io/kubernetes/test/utils" utilpointer "k8s.io/utils/pointer" @@ -274,7 +275,7 @@ func testRollingUpdateDeployment(f *framework.Framework) { _, err := c.AppsV1().ReplicaSets(ns).Create(rs) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) + err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) // Create a deployment to delete nginx pods and instead bring up redis pods. @@ -352,7 +353,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { framework.ExpectNoError(err) // Verify that the required pods have come up. - err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) + err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Create a deployment to delete nginx pods and instead bring up redis pods. @@ -422,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) { _, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage)) framework.ExpectNoError(err) // Verify that the required pods have come up. - err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas) + err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) // Wait for replica set to become ready before adopting it. @@ -859,7 +860,7 @@ func testProportionalScalingDeployment(f *framework.Framework) { // Verify that the required pods have come up. e2elog.Logf("Waiting for all required pods to come up") - err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) + err = e2epod.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas)) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) e2elog.Logf("Waiting for deployment %q to complete", deployment.Name) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index b9a433c5081..3fae9103f46 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -26,6 +26,7 @@ import ( batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -156,7 +157,7 @@ var _ = SIGDescribe("Job", func() { }) ginkgo.By("Checking that the Job readopts the Pod") - gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout, + gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -175,7 +176,7 @@ var _ = SIGDescribe("Job", func() { }) ginkgo.By("Checking that the Job releases the Pod") - gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout, + gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index 371c39397b6..935fd0e02c1 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -112,7 +113,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name - _, err := framework.GetPodsInNamespace(c, ns, map[string]string{}) + _, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) framework.ExpectNoError(err) // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. @@ -157,7 +158,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } node := nodes.Items[0] podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { + if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } @@ -213,7 +214,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") expectNodeReadiness(true, newNode) - if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { + if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) } }() @@ -224,7 +225,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") expectNodeReadiness(false, newNode) - if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { + if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) } }) @@ -243,7 +244,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) replicas := int32(numNodes) common.NewRCByName(c, ns, name, replicas, nil) - err = framework.VerifyPods(c, ns, name, true, replicas) + err = e2epod.VerifyPods(c, ns, name, true, replicas) framework.ExpectNoError(err, "Each pod should start running and responding") ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") @@ -268,7 +269,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) ginkgo.By("verifying whether the pod from the unreachable node is recreated") - err = framework.VerifyPods(c, ns, name, true, replicas) + err = e2epod.VerifyPods(c, ns, name, true, replicas) framework.ExpectNoError(err) }) @@ -286,7 +287,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { additionalPod := "additionalpod" err = newPodOnNode(c, ns, additionalPod, node.Name) framework.ExpectNoError(err) - err = framework.VerifyPods(c, ns, additionalPod, true, 1) + err = e2epod.VerifyPods(c, ns, additionalPod, true, 1) framework.ExpectNoError(err) // verify that it is really on the requested node @@ -310,7 +311,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(err) replicas := int32(numNodes) common.NewRCByName(c, ns, name, replicas, &gracePeriod) - err = framework.VerifyPods(c, ns, name, true, replicas) + err = e2epod.VerifyPods(c, ns, name, true, replicas) framework.ExpectNoError(err, "Each pod should start running and responding") ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") @@ -335,7 +336,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas)) - _, err = framework.PodsCreated(c, ns, name, replicas) + _, err = e2epod.PodsCreated(c, ns, name, replicas) framework.ExpectNoError(err) }) @@ -408,7 +409,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // The grace period on the stateful pods is set to a value > 0. framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) - err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) + err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") }) @@ -435,7 +436,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name})) ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) - _, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) + _, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) framework.ExpectNoError(err) ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node") @@ -452,11 +453,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) - err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) + err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.") ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism)) - _, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) + _, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label) framework.ExpectNoError(err) }) @@ -500,7 +501,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } node := nodes.Items[0] podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} - if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { + if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) @@ -606,7 +607,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) { return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate) })) - if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { + if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) } diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 41725c433f4..00f4af454b7 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -129,7 +130,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri // Check that pods for the new RC were created. // TODO: Maybe switch PodsCreated to just check owner references. - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) framework.ExpectNoError(err) // Wait for the pods to enter the running state. Waiting loops until the pods @@ -164,7 +165,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } @@ -312,7 +313,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas) framework.ExpectNoError(err) p := pods.Items[0] diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index a892a2a04a9..2b585c69865 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" "github.com/onsi/ginkgo" @@ -131,7 +132,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s // Check that pods for the new RS were created. // TODO: Maybe switch PodsCreated to just check owner references. - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas) framework.ExpectNoError(err) // Wait for the pods to enter the running state. Waiting loops until the pods @@ -166,7 +167,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s retryTimeout := 2 * time.Minute retryInterval := 5 * time.Second label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) + err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } @@ -305,7 +306,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("When the matched label of one of its pods change") - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas) framework.ExpectNoError(err) p = &pods.Items[0] diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index eb7f5e9ef9c..9f070f62da4 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -35,6 +35,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -166,7 +167,7 @@ var _ = SIGDescribe("StatefulSet", func() { }) ginkgo.By("Checking that the stateful set readopts the pod") - gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { @@ -186,7 +187,7 @@ var _ = SIGDescribe("StatefulSet", func() { }) ginkgo.By("Checking that the stateful set releases the pod") - gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef != nil { @@ -203,7 +204,7 @@ var _ = SIGDescribe("StatefulSet", func() { }) ginkgo.By("Checking that the stateful set readopts the pod") - gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, + gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout, func(pod *v1.Pod) (bool, error) { controllerRef := metav1.GetControllerOf(pod) if controllerRef == nil { diff --git a/test/e2e/auth/BUILD b/test/e2e/auth/BUILD index 4219d3639ac..01f0bdb710a 100644 --- a/test/e2e/auth/BUILD +++ b/test/e2e/auth/BUILD @@ -57,6 +57,7 @@ go_library( "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/evanphx/json-patch:go_default_library", diff --git a/test/e2e/auth/audit_dynamic.go b/test/e2e/auth/audit_dynamic.go index 671e60e145b..8375f09448d 100644 --- a/test/e2e/auth/audit_dynamic.go +++ b/test/e2e/auth/audit_dynamic.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -158,7 +159,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { // check that we are receiving logs in the proxy err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { - logs, err := framework.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy") + logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy") if err != nil { e2elog.Logf("waiting for audit-proxy pod logs to be available") return false, nil @@ -363,7 +364,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() { pollingTimeout := 5 * time.Minute err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) { // Fetch the logs - logs, err := framework.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy") + logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy") if err != nil { return false, err } diff --git a/test/e2e/auth/pod_security_policy.go b/test/e2e/auth/pod_security_policy.go index f31229e3f86..3717244cbec 100644 --- a/test/e2e/auth/pod_security_policy.go +++ b/test/e2e/auth/pod_security_policy.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" utilpointer "k8s.io/utils/pointer" @@ -90,7 +91,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { ginkgo.By("Running a restricted pod") pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed")) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace)) testPrivilegedPods(func(pod *v1.Pod) { _, err := c.CoreV1().Pods(ns).Create(pod) @@ -109,7 +110,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() { testPrivilegedPods(func(pod *v1.Pod) { p, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace)) // Verify expected PSP was used. p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{}) diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 6523c3ac548..8fb2d3f6f75 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -224,7 +225,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { }, }) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) mountedToken, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey)) framework.ExpectNoError(err) @@ -491,7 +492,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(err) e2elog.Logf("created pod") - if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { + if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) } @@ -500,7 +501,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { var logs string if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { e2elog.Logf("polling logs") - logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") + logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") if err != nil { e2elog.Logf("Error pulling logs: %v", err) return false, nil diff --git a/test/e2e/autoscaling/BUILD b/test/e2e/autoscaling/BUILD index 7a96eaf583a..4694e7546e8 100644 --- a/test/e2e/autoscaling/BUILD +++ b/test/e2e/autoscaling/BUILD @@ -41,6 +41,7 @@ go_library( "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/instrumentation/monitoring:go_default_library", "//test/e2e/scheduling:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 97113b22eba..3867a8ccf3f 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -114,7 +115,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { ginkgo.By("Wait for number of running and ready kube-dns pods recover") label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName})) - _, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) + _, err := e2epod.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout) framework.ExpectNoError(err) }() ginkgo.By("Wait for kube-dns scaled to expected number") diff --git a/test/e2e/common/BUILD b/test/e2e/common/BUILD index a25166ad782..088c6c34c39 100644 --- a/test/e2e/common/BUILD +++ b/test/e2e/common/BUILD @@ -78,6 +78,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/common/apparmor.go b/test/e2e/common/apparmor.go index 395c8d57333..51705908cb3 100644 --- a/test/e2e/common/apparmor.go +++ b/test/e2e/common/apparmor.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/gomega" @@ -126,7 +127,7 @@ done`, testCmd) if runOnce { pod = f.PodClient().Create(pod) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespace( + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( f.ClientSet, pod.Name, f.Namespace.Name)) var err error pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{}) @@ -242,9 +243,9 @@ func createAppArmorProfileLoader(f *framework.Framework) { func getRunningLoaderPod(f *framework.Framework) *api.Pod { label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue})) - pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) + pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label) framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod") pod := &pods.Items[0] - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod") + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod") return pod } diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 8bd357b85f5..7a9d8f7c939 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -173,7 +174,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() { f.PodClient().CreateSync(pod) pollLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) } Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) @@ -272,10 +273,10 @@ var _ = Describe("[sig-storage] ConfigMap", func() { f.PodClient().CreateSync(pod) pollLogs1 := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName1) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName1) } pollLogs2 := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2) } By("Waiting for pod with text data") @@ -430,17 +431,17 @@ var _ = Describe("[sig-storage] ConfigMap", func() { f.PodClient().CreateSync(pod) pollCreateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index 10881e4844d..d892ee3fa1a 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" @@ -258,7 +259,7 @@ var _ = framework.KubeDescribe("Probing container", func() { "involvedObject.namespace": f.Namespace.Name, "reason": events.ContainerProbeWarning, }.AsSelector().String() - framework.ExpectNoError(framework.WaitTimeoutForPodEvent( + framework.ExpectNoError(e2epod.WaitTimeoutForPodEvent( f.ClientSet, pod.Name, f.Namespace.Name, expectedEvent, "0.0.0.0", framework.PodEventTimeout)) }) }) @@ -419,7 +420,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, // Wait until the pod is not pending. (Here we need to check for something other than // 'Pending' other than checking for 'Running', since when failures occur, we go to // 'Terminated' which can cause indefinite blocking.) - framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name), + framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name), fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns)) e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns) diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go index fae20823558..bcd1716af3f 100644 --- a/test/e2e/common/downwardapi_volume.go +++ b/test/e2e/common/downwardapi_volume.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -132,7 +133,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { podClient.CreateSync(pod) Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) @@ -142,7 +143,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { }) Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n")) }) @@ -166,7 +167,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) @@ -176,7 +177,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() { }) Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n")) }) diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index 36b7e74a66a..74338623373 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -382,7 +383,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { podClient = f.PodClient() pod = podClient.Create(pod) - err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err, "while waiting for pod to be running") By("updating the pod") @@ -391,7 +392,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { }) By("waiting for pod running") - err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for pod to be running") By("deleting the pod gracefully") @@ -475,7 +476,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { pod = podClient.Create(pod) By("waiting for pod running") - err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for pod to be running") By("creating a file in subpath") @@ -498,7 +499,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { }) By("waiting for annotated pod running") - err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectNoError(err, "while waiting for annotated pod to be running") By("deleting the pod gracefully") @@ -614,7 +615,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { defer func() { framework.DeletePodWithWait(f, f.ClientSet, pod) }() - err := framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") By("updating the pod") @@ -650,7 +651,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) { framework.DeletePodWithWait(f, f.ClientSet, pod) }() - err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err, "while waiting for pod to be running") } diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index 1176e310919..0492b5a5792 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -179,7 +180,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { f.PodClient().CreateSync(pod) pollLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) } gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) @@ -358,17 +359,17 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { f.PodClient().CreateSync(pod) pollCreateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) diff --git a/test/e2e/common/projected_downwardapi.go b/test/e2e/common/projected_downwardapi.go index 2a5a2081d30..adbdaddefe5 100644 --- a/test/e2e/common/projected_downwardapi.go +++ b/test/e2e/common/projected_downwardapi.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -132,7 +133,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() { podClient.CreateSync(pod) gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n")) @@ -142,7 +143,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() { }) gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n")) }) @@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() { framework.ExpectNoError(err, "Failed to get pod %q", pod.Name) gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n")) @@ -176,7 +177,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() { }) gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) }, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n")) }) diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index 1ec5c84d8b6..416b6c8ce93 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -366,17 +367,17 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { f.PodClient().CreateSync(pod) pollCreateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1")) diff --git a/test/e2e/common/runtimeclass.go b/test/e2e/common/runtimeclass.go index 6a8b7b185fa..6182985019a 100644 --- a/test/e2e/common/runtimeclass.go +++ b/test/e2e/common/runtimeclass.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" utilpointer "k8s.io/utils/pointer" @@ -130,7 +131,7 @@ func createRuntimeClassPod(f *framework.Framework, runtimeClassName string) *v1. // expectPodSuccess waits for the given pod to terminate successfully. func expectPodSuccess(f *framework.Framework, pod *v1.Pod) { - framework.ExpectNoError(framework.WaitForPodSuccessInNamespace( + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace( f.ClientSet, pod.Name, f.Namespace.Name)) } @@ -143,6 +144,6 @@ func expectSandboxFailureEvent(f *framework.Framework, pod *v1.Pod, msg string) "involvedObject.namespace": f.Namespace.Name, "reason": events.FailedCreatePodSandBox, }.AsSelector().String() - framework.ExpectNoError(framework.WaitTimeoutForPodEvent( + framework.ExpectNoError(e2epod.WaitTimeoutForPodEvent( f.ClientSet, pod.Name, f.Namespace.Name, eventSelector, msg, framework.PodEventTimeout)) } diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index d368981e870..13688d768e9 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -331,17 +332,17 @@ var _ = Describe("[sig-storage] Secrets", func() { f.PodClient().CreateSync(pod) pollCreateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName) } Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1")) pollUpdateLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName) } Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3")) pollDeleteLogs := func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName) } Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1")) diff --git a/test/e2e/common/security_context.go b/test/e2e/common/security_context.go index 81893f218b9..48d50efa08b 100644 --- a/test/e2e/common/security_context.go +++ b/test/e2e/common/security_context.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/utils/pointer" @@ -257,7 +258,7 @@ var _ = framework.KubeDescribe("Security Context", func() { */ framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func() { podName := createAndWaitUserPod(false) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go index 88f6c79bc72..9d4c253003a 100644 --- a/test/e2e/common/sysctl.go +++ b/test/e2e/common/sysctl.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) By("Getting logs from the pod") - log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) By("Checking that the sysctl is actually updated") @@ -136,7 +137,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() { Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) By("Getting logs from the pod") - log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) By("Checking that the sysctl is actually updated") diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 317cc60e889..54fee752ce8 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/manifest" testutils "k8s.io/kubernetes/test/utils" @@ -118,7 +119,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { + if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf) runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) @@ -264,11 +265,11 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) { } }() timeout := 5 * time.Minute - if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { + if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil { e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err) return } - logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) + logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name) if err != nil { e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err) } else { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index f6ea9ee1be1..003ff78f3b9 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/testfiles" "github.com/onsi/ginkgo" @@ -76,7 +77,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { var wg sync.WaitGroup passed := true checkRestart := func(podName string, timeout time.Duration) { - err := framework.WaitForPodNameRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNameRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) @@ -122,7 +123,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { ginkgo.By("creating secret and pod") framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if secret was read correctly") @@ -140,7 +141,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { ginkgo.By("creating the pod") framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns) + err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns) framework.ExpectNoError(err) ginkgo.By("checking if name and namespace were passed correctly") diff --git a/test/e2e/framework/BUILD b/test/e2e/framework/BUILD index e0f89e9c254..d4cb7e4fdc6 100644 --- a/test/e2e/framework/BUILD +++ b/test/e2e/framework/BUILD @@ -49,9 +49,7 @@ go_library( "//pkg/kubelet/dockershim/metrics:go_default_library", "//pkg/kubelet/events:go_default_library", "//pkg/kubelet/metrics:go_default_library", - "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/sysctl:go_default_library", - "//pkg/kubelet/util/format:go_default_library", "//pkg/master/ports:go_default_library", "//pkg/registry/core/service/portallocator:go_default_library", "//pkg/scheduler/algorithm/predicates:go_default_library", @@ -111,6 +109,7 @@ go_library( "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/manifest:go_default_library", @@ -152,6 +151,7 @@ filegroup( "//test/e2e/framework/lifecycle:all-srcs", "//test/e2e/framework/log:all-srcs", "//test/e2e/framework/metrics:all-srcs", + "//test/e2e/framework/pod:all-srcs", "//test/e2e/framework/podlogs:all-srcs", "//test/e2e/framework/providers/aws:all-srcs", "//test/e2e/framework/providers/azure:all-srcs", diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 7d974dba7dd..98cf6fedd93 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -48,6 +48,7 @@ import ( scaleclient "k8s.io/client-go/scale" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -431,34 +432,34 @@ func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) { // WaitForPodTerminated waits for the pod to be terminated with the given reason. func (f *Framework) WaitForPodTerminated(podName, reason string) error { - return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name) + return e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name) } // WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able"). func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error { - return waitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout) + return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout) } // WaitForPodRunning waits for the pod to run in the namespace. func (f *Framework) WaitForPodRunning(podName string) error { - return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) + return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) } // WaitForPodReady waits for the pod to flip to ready in the namespace. func (f *Framework) WaitForPodReady(podName string) error { - return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout) + return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout) } // WaitForPodRunningSlow waits for the pod to run in the namespace. // It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout). func (f *Framework) WaitForPodRunningSlow(podName string) error { - return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name) + return e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name) } // WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either // success or failure. func (f *Framework) WaitForPodNoLongerRunning(podName string) error { - return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) + return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name) } // TestContainerOutput runs the given pod in the given namespace and waits diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go index 90c25353792..ab2fcb6d75f 100644 --- a/test/e2e/framework/networking_utils.go +++ b/test/e2e/framework/networking_utils.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" coreclientset "k8s.io/client-go/kubernetes/typed/core/v1" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -525,7 +526,7 @@ func (config *NetworkingTestConfig) DeleteNodePortService() { func (config *NetworkingTestConfig) createTestPods() { testContainerPod := config.createTestPodSpec() - hostTestContainerPod := NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork) + hostTestContainerPod := e2epod.NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork) config.createPod(testContainerPod) config.createPod(hostTestContainerPod) @@ -671,7 +672,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() { config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0)) config.EndpointPods = config.EndpointPods[1:] // wait for pod being deleted. - err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) + err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) if err != nil { Failf("Failed to delete %s pod: %v", pod.Name, err) } diff --git a/test/e2e/framework/pod/BUILD b/test/e2e/framework/pod/BUILD new file mode 100644 index 00000000000..6e190f2219f --- /dev/null +++ b/test/e2e/framework/pod/BUILD @@ -0,0 +1,55 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "resource.go", + "runtimeobject.go", + "wait.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/framework/pod", + visibility = ["//visibility:public"], + deps = [ + "//pkg/api/v1/pod:go_default_library", + "//pkg/apis/apps:go_default_library", + "//pkg/apis/batch:go_default_library", + "//pkg/apis/core:go_default_library", + "//pkg/apis/extensions:go_default_library", + "//pkg/client/conditions:go_default_library", + "//pkg/controller:go_default_library", + "//pkg/kubelet/pod:go_default_library", + "//pkg/kubelet/util/format:go_default_library", + "//staging/src/k8s.io/api/apps/v1:go_default_library", + "//staging/src/k8s.io/api/batch/v1:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/extensions/v1beta1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/framework/log:go_default_library", + "//test/utils:go_default_library", + "//test/utils/image:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/framework/pod/resource.go b/test/e2e/framework/pod/resource.go new file mode 100644 index 00000000000..864aa965a3a --- /dev/null +++ b/test/e2e/framework/pod/resource.go @@ -0,0 +1,689 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/client/conditions" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + testutils "k8s.io/kubernetes/test/utils" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +var ( + // BusyBoxImage is the image URI of BusyBox. + BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) +) + +// TODO: Move to its own subpkg. +// expectNoErrorWithRetries to their own subpackages within framework. +// expectNoError checks if "err" is set, and if so, fails assertion while logging the error. +func expectNoError(err error, explain ...interface{}) { + expectNoErrorWithOffset(1, err, explain...) +} + +// TODO: Move to its own subpkg. +// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller +// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f"). +func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) { + if err != nil { + e2elog.Logf("Unexpected error occurred: %v", err) + } + gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) +} + +// TODO: Move to its own subpkg. +// expectNoErrorWithRetries checks if an error occurs with the given retry count. +func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) { + var err error + for i := 0; i < maxRetries; i++ { + err = fn() + if err == nil { + return + } + e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err) + } + gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...) +} + +func isElementOf(podUID types.UID, pods *v1.PodList) bool { + for _, pod := range pods.Items { + if pod.UID == podUID { + return true + } + } + return false +} + +// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API +// proxy) and verifying that they answer with their own pod name. +type ProxyResponseChecker struct { + c clientset.Interface + ns string + label labels.Selector + controllerName string + respondName bool // Whether the pod should respond with its own name. + pods *v1.PodList +} + +// NewProxyResponseChecker returns a context for checking pods responses. +func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker { + return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods} +} + +// CheckAllResponses issues GETs to all pods in the context and verify they +// reply with their own pod name. +func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) { + successes := 0 + options := metav1.ListOptions{LabelSelector: r.label.String()} + currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) + expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns) + for i, pod := range r.pods.Items { + // Check that the replica list remains unchanged, otherwise we have problems. + if !isElementOf(pod.UID, currentPods) { + return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) + } + + ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout) + defer cancel() + + body, err := r.c.CoreV1().RESTClient().Get(). + Context(ctx). + Namespace(r.ns). + Resource("pods"). + SubResource("proxy"). + Name(string(pod.Name)). + Do(). + Raw() + + if err != nil { + if ctx.Err() != nil { + // We may encounter errors here because of a race between the pod readiness and apiserver + // proxy. So, we log the error and retry if this occurs. + e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) + return false, nil + } + e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) + continue + } + // The response checker expects the pod's name unless !respondName, in + // which case it just checks for a non-empty response. + got := string(body) + what := "" + if r.respondName { + what = "expected" + want := pod.Name + if got != want { + e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q", + r.controllerName, i+1, pod.Name, want, got) + continue + } + } else { + what = "non-empty" + if len(got) == 0 { + e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response", + r.controllerName, i+1, pod.Name) + continue + } + } + successes++ + e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far", + r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items)) + } + if successes < len(r.pods.Items) { + return false, nil + } + return true, nil +} + +// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. +func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) { + // check for remaining pods + pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + return 0, 0, err + } + + // nothing remains! + if len(pods.Items) == 0 { + return 0, 0, nil + } + + // stuff remains, log about it + LogPodStates(pods.Items) + + // check if there were any pods with missing deletion timestamp + numPods := len(pods.Items) + missingTimestamp := 0 + for _, pod := range pods.Items { + if pod.DeletionTimestamp == nil { + missingTimestamp++ + } + } + return numPods, missingTimestamp, nil +} + +// Initialized checks the state of all init containers in the pod. +func Initialized(pod *v1.Pod) (ok bool, failed bool, err error) { + allInit := true + initFailed := false + for _, s := range pod.Status.InitContainerStatuses { + switch { + case initFailed && s.State.Waiting == nil: + return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name) + case allInit && s.State.Waiting == nil: + return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name) + case s.State.Terminated == nil: + allInit = false + case s.State.Terminated.ExitCode != 0: + allInit = false + initFailed = true + case !s.Ready: + return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name) + } + } + return allInit, initFailed, nil +} + +func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodRunning: + return true, nil + case v1.PodFailed, v1.PodSucceeded: + return false, conditions.ErrPodCompleted + } + return false, nil + } +} + +func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodFailed, v1.PodSucceeded: + return true, nil + } + return false, nil + } +} + +func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodFailed, v1.PodSucceeded: + return false, conditions.ErrPodCompleted + case v1.PodRunning: + return podutil.IsPodReady(pod), nil + } + return false, nil + } +} + +func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc { + return func() (bool, error) { + pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodPending: + return false, nil + default: + return true, nil + } + } +} + +// PodsCreated returns a pod list matched by the given name. +func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { + label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) + return PodsCreatedByLabel(c, ns, name, replicas, label) +} + +// PodsCreatedByLabel returns a created pod list matched by the given label. +func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { + timeout := 2 * time.Minute + for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { + options := metav1.ListOptions{LabelSelector: label.String()} + + // List the pods, making sure we observe all the replicas. + pods, err := c.CoreV1().Pods(ns).List(options) + if err != nil { + return nil, err + } + + created := []v1.Pod{} + for _, pod := range pods.Items { + if pod.DeletionTimestamp != nil { + continue + } + created = append(created, pod) + } + e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) + + if int32(len(created)) == replicas { + pods.Items = created + return pods, nil + } + } + return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) +} + +// VerifyPods checks if the specified pod is responding. +func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { + return podRunningMaybeResponding(c, ns, name, wantName, replicas, true) +} + +// VerifyPodsRunning checks if the specified pod is running. +func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { + return podRunningMaybeResponding(c, ns, name, wantName, replicas, false) +} + +func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error { + pods, err := PodsCreated(c, ns, name, replicas) + if err != nil { + return err + } + e := podsRunning(c, pods) + if len(e) > 0 { + return fmt.Errorf("failed to wait for pods running: %v", e) + } + if checkResponding { + err = PodsResponding(c, ns, name, wantName, pods) + if err != nil { + return fmt.Errorf("failed to wait for pods responding: %v", err) + } + } + return nil +} + +func podsRunning(c clientset.Interface, pods *v1.PodList) []error { + // Wait for the pods to enter the running state. Waiting loops until the pods + // are running so non-running pods cause a timeout for this test. + ginkgo.By("ensuring each pod is running") + e := []error{} + errorChan := make(chan error) + + for _, pod := range pods.Items { + go func(p v1.Pod) { + errorChan <- WaitForPodRunningInNamespace(c, &p) + }(pod) + } + + for range pods.Items { + err := <-errorChan + if err != nil { + e = append(e, err) + } + } + + return e +} + +// DumpAllPodInfo logs basic info for all pods. +func DumpAllPodInfo(c clientset.Interface) { + pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("unable to fetch pod debug info: %v", err) + } + LogPodStates(pods.Items) +} + +// LogPodStates logs basic info of provided pods for debugging. +func LogPodStates(pods []v1.Pod) { + // Find maximum widths for pod, node, and phase strings for column printing. + maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE") + for i := range pods { + pod := &pods[i] + if len(pod.ObjectMeta.Name) > maxPodW { + maxPodW = len(pod.ObjectMeta.Name) + } + if len(pod.Spec.NodeName) > maxNodeW { + maxNodeW = len(pod.Spec.NodeName) + } + if len(pod.Status.Phase) > maxPhaseW { + maxPhaseW = len(pod.Status.Phase) + } + } + // Increase widths by one to separate by a single space. + maxPodW++ + maxNodeW++ + maxPhaseW++ + maxGraceW++ + + // Log pod info. * does space padding, - makes them left-aligned. + e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", + maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") + for _, pod := range pods { + grace := "" + if pod.DeletionGracePeriodSeconds != nil { + grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) + } + e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", + maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) + } + e2elog.Logf("") // Final empty line helps for readability. +} + +// LogPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows +// why pods crashed and since it is in the API, it's fast to retrieve. +func LogPodTerminationMessages(pods []v1.Pod) { + for _, pod := range pods { + for _, status := range pod.Status.InitContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + for _, status := range pod.Status.ContainerStatuses { + if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { + e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) + } + } + } +} + +// DumpAllPodInfoForNamespace logs all pod information for a given namespace. +func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) { + pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("unable to fetch pod debug info: %v", err) + } + LogPodStates(pods.Items) + LogPodTerminationMessages(pods.Items) +} + +// FilterNonRestartablePods filters out pods that will never get recreated if +// deleted after termination. +func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod { + var results []*v1.Pod + for _, p := range pods { + if isNotRestartAlwaysMirrorPod(p) { + // Mirror pods with restart policy == Never will not get + // recreated if they are deleted after the pods have + // terminated. For now, we discount such pods. + // https://github.com/kubernetes/kubernetes/issues/34003 + continue + } + results = append(results, p) + } + return results +} + +func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool { + if !kubepod.IsMirrorPod(p) { + return false + } + return p.Spec.RestartPolicy != v1.RestartPolicyAlways +} + +// NewExecPodSpec returns the pod spec of hostexec pod +func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod { + immediate := int64(0) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "hostexec", + Image: imageutils.GetE2EImage(imageutils.Hostexec), + ImagePullPolicy: v1.PullIfNotPresent, + }, + }, + HostNetwork: hostNetwork, + SecurityContext: &v1.PodSecurityContext{}, + TerminationGracePeriodSeconds: &immediate, + }, + } + return pod +} + +// LaunchHostExecPod launches a hostexec pod in the given namespace and waits +// until it's Running +func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { + hostExecPod := NewExecPodSpec(ns, name, true) + pod, err := client.CoreV1().Pods(ns).Create(hostExecPod) + expectNoError(err) + err = WaitForPodRunningInNamespace(client, pod) + expectNoError(err) + return pod +} + +// newExecPodSpec returns the pod spec of exec pod +func newExecPodSpec(ns, generateName string) *v1.Pod { + immediate := int64(0) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: generateName, + Namespace: ns, + }, + Spec: v1.PodSpec{ + TerminationGracePeriodSeconds: &immediate, + Containers: []v1.Container{ + { + Name: "exec", + Image: BusyBoxImage, + Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"}, + }, + }, + }, + } + return pod +} + +// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a +// vessel for kubectl exec commands. +// Returns the name of the created pod. +func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string { + e2elog.Logf("Creating new exec pod") + execPod := newExecPodSpec(ns, generateName) + if tweak != nil { + tweak(execPod) + } + created, err := client.CoreV1().Pods(ns).Create(execPod) + expectNoError(err, "failed to create new exec pod in namespace: %s", ns) + err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { + retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) + if err != nil { + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + return retrievedPod.Status.Phase == v1.PodRunning, nil + }) + expectNoError(err) + return created.Name +} + +// CreatePodOrFail creates a pod with the specified containerPorts. +func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { + ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "pause", + Image: imageutils.GetPauseImageName(), + Ports: containerPorts, + // Add a dummy environment variable to work around a docker issue. + // https://github.com/docker/docker/issues/14203 + Env: []v1.EnvVar{{Name: "FOO", Value: " "}}, + }, + }, + }, + } + _, err := c.CoreV1().Pods(ns).Create(pod) + expectNoError(err, "failed to create pod %s in namespace %s", name, ns) +} + +// DeletePodOrFail deletes the pod of the specified namespace and name. +func DeletePodOrFail(c clientset.Interface, ns, name string) { + ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) + err := c.CoreV1().Pods(ns).Delete(name, nil) + expectNoError(err, "failed to delete pod %s in namespace %s", name, ns) +} + +// CheckPodsRunningReady returns whether all pods whose names are listed in +// podNames in namespace ns are running and ready, using c and waiting at most +// timeout. +func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { + return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") +} + +// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are +// listed in podNames in namespace ns are running and ready, or succeeded; use +// c and waiting at most timeout. +func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { + return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") +} + +// CheckPodsCondition returns whether all pods whose names are listed in podNames +// in namespace ns are in the condition, using c and waiting at most timeout. +func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { + np := len(podNames) + e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) + type waitPodResult struct { + success bool + podName string + } + result := make(chan waitPodResult, len(podNames)) + for _, podName := range podNames { + // Launch off pod readiness checkers. + go func(name string) { + err := WaitForPodCondition(c, ns, name, desc, timeout, condition) + result <- waitPodResult{err == nil, name} + }(podName) + } + // Wait for them all to finish. + success := true + for range podNames { + res := <-result + if !res.success { + e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc) + success = false + } + } + e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames) + return success +} + +// GetPodLogs returns the logs of the specified container (namespace/pod/container). +// TODO(random-liu): Change this to be a member function of the framework. +func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, false) +} + +// GetPreviousPodLogs returns the logs of the previous instance of the +// specified container (namespace/pod/container). +func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { + return getPodLogsInternal(c, namespace, podName, containerName, true) +} + +// utility function for gomega Eventually +func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { + logs, err := c.CoreV1().RESTClient().Get(). + Resource("pods"). + Namespace(namespace). + Name(podName).SubResource("log"). + Param("container", containerName). + Param("previous", strconv.FormatBool(previous)). + Do(). + Raw() + if err != nil { + return "", err + } + if err == nil && strings.Contains(string(logs), "Internal Error") { + return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs)) + } + return string(logs), err +} + +// GetPodsInNamespace returns the pods in the given namespace. +func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { + pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + return []*v1.Pod{}, err + } + ignoreSelector := labels.SelectorFromSet(ignoreLabels) + filtered := []*v1.Pod{} + for _, p := range pods.Items { + if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) { + continue + } + filtered = append(filtered, &p) + } + return filtered, nil +} + +// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. +func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { + for _, pod := range pods.Items { + if !masterNodes.Has(pod.Spec.NodeName) { + if pod.Spec.NodeName != "" { + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) + gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue)) + scheduledPods = append(scheduledPods, pod) + } else { + _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) + gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) + gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse)) + if scheduledCondition.Reason == "Unschedulable" { + + notScheduledPods = append(notScheduledPods, pod) + } + } + } + } + return +} diff --git a/test/e2e/framework/pod/runtimeobject.go b/test/e2e/framework/pod/runtimeobject.go new file mode 100644 index 00000000000..fc98bcfeac7 --- /dev/null +++ b/test/e2e/framework/pod/runtimeobject.go @@ -0,0 +1,124 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + batch "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + clientset "k8s.io/client-go/kubernetes" + appsinternal "k8s.io/kubernetes/pkg/apis/apps" + batchinternal "k8s.io/kubernetes/pkg/apis/batch" + api "k8s.io/kubernetes/pkg/apis/core" + extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" +) + +// TODO: This function is generic enough and used enough that it should be +// moved to its own subpkg. +func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { + switch kind { + case api.Kind("ReplicationController"): + return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) + case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"): + return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{}) + case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"): + return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) + case extensionsinternal.Kind("DaemonSet"): + return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{}) + case batchinternal.Kind("Job"): + return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{}) + default: + return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind) + } +} + +// TODO: This function is generic enough and used enough that it should be +// moved to its own subpkg. +func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) { + switch typed := obj.(type) { + case *v1.ReplicationController: + return labels.SelectorFromSet(typed.Spec.Selector), nil + case *extensions.ReplicaSet: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *apps.ReplicaSet: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *extensions.Deployment: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *apps.Deployment: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *extensions.DaemonSet: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *apps.DaemonSet: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + case *batch.Job: + return metav1.LabelSelectorAsSelector(typed.Spec.Selector) + default: + return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj) + } +} + +// TODO: This function is generic enough and used enough that it should be +// moved to its own subpkg. +func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) { + switch typed := obj.(type) { + case *v1.ReplicationController: + if typed.Spec.Replicas != nil { + return *typed.Spec.Replicas, nil + } + return 0, nil + case *extensions.ReplicaSet: + if typed.Spec.Replicas != nil { + return *typed.Spec.Replicas, nil + } + return 0, nil + case *apps.ReplicaSet: + if typed.Spec.Replicas != nil { + return *typed.Spec.Replicas, nil + } + return 0, nil + case *extensions.Deployment: + if typed.Spec.Replicas != nil { + return *typed.Spec.Replicas, nil + } + return 0, nil + case *apps.Deployment: + if typed.Spec.Replicas != nil { + return *typed.Spec.Replicas, nil + } + return 0, nil + case *extensions.DaemonSet: + return 0, nil + case *apps.DaemonSet: + return 0, nil + case *batch.Job: + // TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods + // that actually finish we need a better way to do this. + if typed.Spec.Parallelism != nil { + return *typed.Spec.Parallelism, nil + } + return 0, nil + default: + return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj) + } +} diff --git a/test/e2e/framework/pod/wait.go b/test/e2e/framework/pod/wait.go new file mode 100644 index 00000000000..abb08060bda --- /dev/null +++ b/test/e2e/framework/pod/wait.go @@ -0,0 +1,651 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "bytes" + "errors" + "fmt" + "strings" + "sync" + "text/tabwriter" + "time" + + "github.com/onsi/ginkgo" + + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/kubelet/util/format" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + testutils "k8s.io/kubernetes/test/utils" +) + +const ( + // defaultPodDeletionTimeout is the default timeout for deleting pod. + defaultPodDeletionTimeout = 3 * time.Minute + + // podListTimeout is how long to wait for the pod to be listable. + podListTimeout = time.Minute + + podRespondingTimeout = 15 * time.Minute + + // How long pods have to become scheduled onto nodes + podScheduledBeforeTimeout = podListTimeout + (20 * time.Second) + + // podStartTimeout is how long to wait for the pod to be started. + // Initial pod start can be delayed O(minutes) by slow docker pulls. + // TODO: Make this 30 seconds once #4566 is resolved. + podStartTimeout = 5 * time.Minute + + // poll is how often to poll pods, nodes and claims. + poll = 2 * time.Second + pollShortTimeout = 1 * time.Minute + pollLongTimeout = 5 * time.Minute + + // singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent + // transient failures from failing tests. + // TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed. + singleCallTimeout = 5 * time.Minute + + // Some pods can take much longer to get ready due to volume attach/detach latency. + slowPodStartTimeout = 15 * time.Minute +) + +type podCondition func(pod *v1.Pod) (bool, error) + +// errorBadPodsStates create error message of basic info of bad pods for debugging. +func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { + errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) + // Print bad pods info only if there are fewer than 10 bad pods + if len(badPods) > 10 { + return errStr + "There are too many bad pods. Please check log for details." + } + + buf := bytes.NewBuffer(nil) + w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0) + fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS") + for _, badPod := range badPods { + grace := "" + if badPod.DeletionGracePeriodSeconds != nil { + grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds) + } + podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v", + badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions) + fmt.Fprintln(w, podInfo) + } + w.Flush() + return errStr + buf.String() +} + +// WaitForPodsRunningReady waits up to timeout to ensure that all pods in +// namespace ns are either running and ready, or failed but controlled by a +// controller. Also, it ensures that at least minPods are running and +// ready. It has separate behavior from other 'wait for' pods functions in +// that it requests the list of pods on every iteration. This is useful, for +// example, in cluster startup, because the number of pods increases while +// waiting. All pods that are in SUCCESS state are not counted. +// +// If ignoreLabels is not empty, pods matching this selector are ignored. +func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { + ignoreSelector := labels.SelectorFromSet(map[string]string{}) + start := time.Now() + e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", + timeout, minPods, ns) + wg := sync.WaitGroup{} + wg.Add(1) + var ignoreNotReady bool + badPods := []v1.Pod{} + desiredPods := 0 + notReady := int32(0) + + if wait.PollImmediate(poll, timeout, func() (bool, error) { + // We get the new list of pods, replication controllers, and + // replica sets in every iteration because more pods come + // online during startup and we want to ensure they are also + // checked. + replicas, replicaOk := int32(0), int32(0) + + rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + for _, rc := range rcList.Items { + replicas += *rc.Spec.Replicas + replicaOk += rc.Status.ReadyReplicas + } + + rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + for _, rs := range rsList.Items { + replicas += *rs.Spec.Replicas + replicaOk += rs.Status.ReadyReplicas + } + + podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + nOk := int32(0) + notReady = int32(0) + badPods = []v1.Pod{} + desiredPods = len(podList.Items) + for _, pod := range podList.Items { + if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { + continue + } + res, err := testutils.PodRunningReady(&pod) + switch { + case res && err == nil: + nOk++ + case pod.Status.Phase == v1.PodSucceeded: + e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name) + // it doesn't make sense to wait for this pod + continue + case pod.Status.Phase != v1.PodFailed: + e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) + notReady++ + badPods = append(badPods, pod) + default: + if metav1.GetControllerOf(&pod) == nil { + e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) + badPods = append(badPods, pod) + } + //ignore failed pods that are controlled by some controller + } + } + + e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", + nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) + e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) + + if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { + return true, nil + } + ignoreNotReady = (notReady <= allowedNotReadyPods) + LogPodStates(badPods) + return false, nil + }) != nil { + if !ignoreNotReady { + return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout)) + } + e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) + } + return nil +} + +// WaitForPodCondition waits a pods to be matched to the given condition. +func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { + e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) + return err + } + e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err) + continue + } + // log now so that current pod info is reported before calling `condition()` + e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v", + podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start)) + if done, err := condition(pod); done { + if err == nil { + e2elog.Logf("Pod %q satisfied condition %q", podName, desc) + } + return err + } + } + return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc) +} + +// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate, +// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not +// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully +// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to +// the supplied reason. +func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { + return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) { + // Only consider Failed pods. Successful pods will be deleted and detected in + // waitForPodCondition's Get call returning `IsNotFound` + if pod.Status.Phase == v1.PodFailed { + if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop + return true, nil + } + return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) + } + return false, nil + }) +} + +// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. +func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error { + return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { + if pod.Spec.RestartPolicy == v1.RestartPolicyAlways { + return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) + } + switch pod.Status.Phase { + case v1.PodSucceeded: + ginkgo.By("Saw pod success") + return true, nil + case v1.PodFailed: + return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) + default: + return false, nil + } + }) +} + +// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending +// and have condition Status equal to Unschedulable, +// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason. +// Typically called to test that the passed-in pod is Pending and Unschedulable. +func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error { + return WaitForPodCondition(c, namespace, podName, "Unschedulable", podStartTimeout, func(pod *v1.Pod) (bool, error) { + // Only consider Failed pods. Successful pods will be deleted and detected in + // waitForPodCondition's Get call returning `IsNotFound` + if pod.Status.Phase == v1.PodPending { + for _, cond := range pod.Status.Conditions { + if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { + return true, nil + } + } + } + if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { + return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase) + } + return false, nil + }) +} + +// WaitForMatchPodsCondition finds match pods based on the input ListOptions. +// waits and checks if all match pods are in the given podCondition +func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { + e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts) + if err != nil { + return err + } + conditionNotMatch := []string{} + for _, pod := range pods.Items { + done, err := condition(&pod) + if done && err != nil { + return fmt.Errorf("Unexpected error: %v", err) + } + if !done { + conditionNotMatch = append(conditionNotMatch, format.Pod(&pod)) + } + } + if len(conditionNotMatch) <= 0 { + return err + } + e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch) + } + return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout) +} + +// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. +// Returns an error if timeout occurs first, or pod goes in to failed state. +func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout) +} + +// WaitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. +// The resourceVersion is used when Watching object changes, it tells since when we care +// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. +func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) +} + +// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. +func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace)) +} + +// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running. +// Returns an error if timeout occurs first, or pod goes in to failed state. +func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { + if pod.Status.Phase == v1.PodRunning { + return nil + } + return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, podStartTimeout) +} + +// WaitTimeoutForPodEvent waits the given timeout duration for a pod event to occur. +func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg)) +} + +func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg string) wait.ConditionFunc { + options := metav1.ListOptions{FieldSelector: eventSelector} + return func() (bool, error) { + events, err := c.CoreV1().Events(namespace).List(options) + if err != nil { + return false, fmt.Errorf("got error while getting pod events: %s", err) + } + for _, event := range events.Items { + if strings.Contains(event.Message, msg) { + return true, nil + } + } + return false, nil + } +} + +// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop. +func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, podCompleted(c, podName, namespace)) +} + +// WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running. +// Returns an error if timeout occurs first. +func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { + return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout) +} + +// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the +// specified pod to be ready and running. +func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace)) +} + +// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. +// The resourceVersion is used when Watching object changes, it tells since when we care +// about changes to the pod. +func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { + return wait.PollImmediate(poll, podStartTimeout, podNotPending(c, podName, ns)) +} + +// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. +func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { + return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout) +} + +// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. +func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { + return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) +} + +// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. +// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get +// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other +// than "not found" then that error is returned and the wait stops. +func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { + return wait.PollImmediate(poll, timeout, func() (bool, error) { + _, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + if apierrs.IsNotFound(err) { + return true, nil // done + } + if err != nil { + return true, err // stop wait with error + } + return false, nil + }) +} + +// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. +func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { + return wait.PollImmediate(interval, timeout, func() (bool, error) { + e2elog.Logf("Waiting for pod %s to disappear", podName) + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err := c.CoreV1().Pods(ns).List(options) + if err != nil { + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + found := false + for _, pod := range pods.Items { + if pod.Name == podName { + e2elog.Logf("Pod %s still exists", podName) + found = true + break + } + } + if !found { + e2elog.Logf("Pod %s no longer exists", podName) + return true, nil + } + return false, nil + }) +} + +// PodsResponding waits for the pods to response. +func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { + ginkgo.By("trying to dial each unique pod") + label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) + return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) +} + +// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running. +func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error { + rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) + if err != nil { + return err + } + selector, err := getSelectorFromRuntimeObject(rtObject) + if err != nil { + return err + } + replicas, err := getReplicasFromRuntimeObject(rtObject) + if err != nil { + return err + } + err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas)) + if err != nil { + return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) + } + return nil +} + +// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them. +func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) { + rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) + if err != nil { + return nil, err + } + selector, err := getSelectorFromRuntimeObject(rtObject) + if err != nil { + return nil, err + } + return WaitForPodsWithLabel(c, ns, selector) +} + +// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one +// matching pod exists. Return the list of matching pods. +func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { + err = wait.PollImmediate(poll, podScheduledBeforeTimeout, + func() (bool, error) { + pods, err = WaitForPodsWithLabel(c, ns, label) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Spec.NodeName == "" { + return false, nil + } + } + return true, nil + }) + return pods, err +} + +// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label +func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { + for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) { + options := metav1.ListOptions{LabelSelector: label.String()} + pods, err = c.CoreV1().Pods(ns).List(options) + if err != nil { + if testutils.IsRetryableAPIError(err) { + continue + } + return + } + if len(pods.Items) > 0 { + break + } + } + if pods == nil || len(pods.Items) == 0 { + err = fmt.Errorf("Timeout while waiting for pods with label %v", label) + } + return +} + +// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready. +// Return the list of matching pods. +func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { + var current int + err = wait.Poll(poll, timeout, + func() (bool, error) { + pods, err := WaitForPodsWithLabel(c, ns, label) + if err != nil { + e2elog.Logf("Failed to list pods: %v", err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + current = 0 + for _, pod := range pods.Items { + if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true { + current++ + } + } + if current != num { + e2elog.Logf("Got %v pods running and ready, expect: %v", current, num) + return false, nil + } + return true, nil + }) + return pods, err +} + +// WaitForPodsInactive waits until there are no active pods left in the PodStore. +// This is to make a fair comparison of deletion time between DeleteRCAndPods +// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas +// when the pod is inactvie. +func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { + var activePods []*v1.Pod + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + pods := ps.List() + activePods = nil + for _, pod := range pods { + if controller.IsPodActive(pod) { + activePods = append(activePods, pod) + } + } + + if len(activePods) != 0 { + return false, nil + } + return true, nil + }) + + if err == wait.ErrWaitTimeout { + for _, pod := range activePods { + e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName) + } + return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName) + } + return err +} + +// WaitForPodsGone waits until there are no pods left in the PodStore. +func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { + var pods []*v1.Pod + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + if pods = ps.List(); len(pods) == 0 { + return true, nil + } + return false, nil + }) + + if err == wait.ErrWaitTimeout { + for _, pod := range pods { + e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName) + } + return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName) + } + return err +} + +// WaitForPodsReady waits for the pods to become ready. +func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { + label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) + options := metav1.ListOptions{LabelSelector: label.String()} + return wait.Poll(poll, 5*time.Minute, func() (bool, error) { + pods, err := c.CoreV1().Pods(ns).List(options) + if err != nil { + return false, nil + } + for _, pod := range pods.Items { + if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { + return false, nil + } + } + return true, nil + }) +} + +// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them, +// returning their names if it can do so before timeout. +func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { + var pods []*v1.Pod + var errLast error + found := wait.Poll(poll, timeout, func() (bool, error) { + allPods := ps.List() + pods = FilterNonRestartablePods(allPods) + if len(pods) != expect { + errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) + e2elog.Logf("Error getting pods: %v", errLast) + return false, nil + } + return true, nil + }) == nil + podNames := make([]string, len(pods)) + for i, p := range pods { + podNames[i] = p.ObjectMeta.Name + } + if !found { + return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v", + expect, timeout, errLast) + } + return podNames, nil +} diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go index f70043fba8e..4d0cb600155 100644 --- a/test/e2e/framework/pods.go +++ b/test/e2e/framework/pods.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/sysctl" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -110,7 +111,7 @@ func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod { // CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start. func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod { p := c.Create(pod) - ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace)) + ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace)) // Get the newest pod after it becomes running, some status may change after pod created, such as pod ip. p, err := c.Get(p.Name, metav1.GetOptions{}) ExpectNoError(err) @@ -174,7 +175,7 @@ func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options if err != nil && !errors.IsNotFound(err) { Failf("Failed to delete pod %q: %v", name, err) } - gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), + gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(), 2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name) } @@ -218,7 +219,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) { // TODO(random-liu): Move pod wait function into this file func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { f := c.f - gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, + gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -235,7 +236,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { // WaitForFailure waits for pod to fail. func (c *PodClient) WaitForFailure(name string, timeout time.Duration) { f := c.f - gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, + gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -252,7 +253,7 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) { // WaitForFinish waits for pod to finish running, regardless of success or failure. func (c *PodClient) WaitForFinish(name string, timeout time.Duration) { f := c.f - gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, + gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -293,7 +294,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) { // MatchContainerOutput gets output of a container and match expected regexp in the output. func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error { f := c.f - output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName) + output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName) if err != nil { return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name) } diff --git a/test/e2e/framework/providers/gce/BUILD b/test/e2e/framework/providers/gce/BUILD index 67b54f3b775..31ff12e9941 100644 --- a/test/e2e/framework/providers/gce/BUILD +++ b/test/e2e/framework/providers/gce/BUILD @@ -24,6 +24,7 @@ go_library( "//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index 9414d02afcd..ac2161a2794 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" ) @@ -57,13 +58,13 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() { ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything()) allPods := ps.List() - originalPods := framework.FilterNonRestartablePods(allPods) + originalPods := e2epod.FilterNonRestartablePods(allPods) originalPodNames = make([]string, len(originalPods)) for i, p := range originalPods { originalPodNames[i] = p.ObjectMeta.Name } - if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } @@ -114,10 +115,10 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace // Make sure the pods from before node recreation are running/completed podCheckStart := time.Now() - podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout) + podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout) framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) - if !framework.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { framework.Failf("At least one pod wasn't running and ready after the restart.") } } diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index e72900fa9ea..619cbab0f7e 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -32,6 +32,7 @@ import ( storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util" "k8s.io/kubernetes/pkg/volume/util" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -501,7 +502,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV // Test the pod's exit code to be zero. func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error { ginkgo.By("Pod should terminate with exitcode 0 (success)") - if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil { + if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil { return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) } e2elog.Logf("Pod %v succeeded ", pod.Name) @@ -856,7 +857,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } @@ -876,7 +877,7 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } @@ -907,7 +908,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, } // Waiting for pod to be running - err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } @@ -962,7 +963,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to become Unschedulable - err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace) + err = e2epod.WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace) if err != nil { return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err) } diff --git a/test/e2e/framework/rc_util.go b/test/e2e/framework/rc_util.go index 65c800fb0c0..5a8073ce433 100644 --- a/test/e2e/framework/rc_util.go +++ b/test/e2e/framework/rc_util.go @@ -31,6 +31,7 @@ import ( scaleclient "k8s.io/client-go/scale" api "k8s.io/kubernetes/pkg/apis/core" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" ) @@ -137,7 +138,7 @@ func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) // NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects. // The grace period must be set to 0 on the pod for it to be deleted during the partition. // Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion. - return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute) + return e2epod.WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute) } // WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false) diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 4208c6b4995..a6578b167bd 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -40,6 +40,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/registry/core/service/portallocator" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -818,7 +819,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute - if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) { + if !e2epod.CheckPodsRunningReady(j.Client, namespace, pods, timeout) { return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods)) } return nil @@ -1303,9 +1304,9 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er // in the cluster. Each pod in the service is expected to echo its name. These // names are compared with the given expectedPods list after a sort | uniq. func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { - execPodName := CreateExecPodOrFail(c, ns, "execpod-", nil) + execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil) defer func() { - DeletePodOrFail(c, ns, execPodName) + e2epod.DeletePodOrFail(c, ns, execPodName) }() // Loop a bunch of times - the proxy is randomized, so we want a good diff --git a/test/e2e/framework/statefulset_utils.go b/test/e2e/framework/statefulset_utils.go index c712e68b34f..3a210bffb8e 100644 --- a/test/e2e/framework/statefulset_utils.go +++ b/test/e2e/framework/statefulset_utils.go @@ -39,6 +39,7 @@ import ( clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/manifest" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -279,7 +280,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful podList := s.GetPodList(ss) statefulPodCount := len(podList.Items) if statefulPodCount != count { - logPodStates(podList.Items) + e2epod.LogPodStates(podList.Items) if hard { Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items)) } else { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index ca58c0001c1..d2b5a6e7422 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -58,7 +57,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" utilversion "k8s.io/apimachinery/pkg/util/version" @@ -85,8 +83,6 @@ import ( nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" "k8s.io/kubernetes/pkg/controller/service" "k8s.io/kubernetes/pkg/features" - kubepod "k8s.io/kubernetes/pkg/kubelet/pod" - "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -94,6 +90,7 @@ import ( taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -500,59 +497,6 @@ var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"} type podCondition func(pod *v1.Pod) (bool, error) -// logPodStates logs basic info of provided pods for debugging. -func logPodStates(pods []v1.Pod) { - // Find maximum widths for pod, node, and phase strings for column printing. - maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE") - for i := range pods { - pod := &pods[i] - if len(pod.ObjectMeta.Name) > maxPodW { - maxPodW = len(pod.ObjectMeta.Name) - } - if len(pod.Spec.NodeName) > maxNodeW { - maxNodeW = len(pod.Spec.NodeName) - } - if len(pod.Status.Phase) > maxPhaseW { - maxPhaseW = len(pod.Status.Phase) - } - } - // Increase widths by one to separate by a single space. - maxPodW++ - maxNodeW++ - maxPhaseW++ - maxGraceW++ - - // Log pod info. * does space padding, - makes them left-aligned. - e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", - maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") - for _, pod := range pods { - grace := "" - if pod.DeletionGracePeriodSeconds != nil { - grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) - } - e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", - maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) - } - e2elog.Logf("") // Final empty line helps for readability. -} - -// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows -// why pods crashed and since it is in the API, it's fast to retrieve. -func logPodTerminationMessages(pods []v1.Pod) { - for _, pod := range pods { - for _, status := range pod.Status.InitContainerStatuses { - if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { - e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) - } - } - for _, status := range pod.Status.ContainerStatuses { - if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { - e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) - } - } - } -} - // errorBadPodsStates create error message of basic info of bad pods for debugging. func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string { errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout) @@ -577,116 +521,6 @@ func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState stri return errStr + buf.String() } -// WaitForPodsRunningReady waits up to timeout to ensure that all pods in -// namespace ns are either running and ready, or failed but controlled by a -// controller. Also, it ensures that at least minPods are running and -// ready. It has separate behavior from other 'wait for' pods functions in -// that it requests the list of pods on every iteration. This is useful, for -// example, in cluster startup, because the number of pods increases while -// waiting. All pods that are in SUCCESS state are not counted. -// -// If ignoreLabels is not empty, pods matching this selector are ignored. -func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { - ignoreSelector := labels.SelectorFromSet(map[string]string{}) - start := time.Now() - e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", - timeout, minPods, ns) - wg := sync.WaitGroup{} - wg.Add(1) - var ignoreNotReady bool - badPods := []v1.Pod{} - desiredPods := 0 - notReady := int32(0) - - if wait.PollImmediate(Poll, timeout, func() (bool, error) { - // We get the new list of pods, replication controllers, and - // replica sets in every iteration because more pods come - // online during startup and we want to ensure they are also - // checked. - replicas, replicaOk := int32(0), int32(0) - - rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{}) - if err != nil { - e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - for _, rc := range rcList.Items { - replicas += *rc.Spec.Replicas - replicaOk += rc.Status.ReadyReplicas - } - - rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{}) - if err != nil { - e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - for _, rs := range rsList.Items { - replicas += *rs.Spec.Replicas - replicaOk += rs.Status.ReadyReplicas - } - - podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) - if err != nil { - e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - nOk := int32(0) - notReady = int32(0) - badPods = []v1.Pod{} - desiredPods = len(podList.Items) - for _, pod := range podList.Items { - if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { - continue - } - res, err := testutils.PodRunningReady(&pod) - switch { - case res && err == nil: - nOk++ - case pod.Status.Phase == v1.PodSucceeded: - e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name) - // it doesn't make sense to wait for this pod - continue - case pod.Status.Phase != v1.PodFailed: - e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) - notReady++ - badPods = append(badPods, pod) - default: - if metav1.GetControllerOf(&pod) == nil { - e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) - badPods = append(badPods, pod) - } - //ignore failed pods that are controlled by some controller - } - } - - e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", - nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) - e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) - - if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { - return true, nil - } - ignoreNotReady = (notReady <= allowedNotReadyPods) - logPodStates(badPods) - return false, nil - }) != nil { - if !ignoreNotReady { - return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout)) - } - e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) - } - return nil -} - // WaitForDaemonSets for all daemonsets in the given namespace to be ready // (defined as all but 'allowedNotReadyNodes' pods associated with that // daemonset are ready). @@ -725,9 +559,9 @@ func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string for _, container := range pod.Spec.Containers { if strings.Contains(container.Name, containerNameSubstr) { // Contains() matches all strings if substr is empty - logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name) + logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { - logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) + logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name) if err != nil { logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err) } @@ -830,59 +664,6 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN return err } -// WaitForPodCondition waits a pods to be matched to the given condition. -func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error { - e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) - if err != nil { - if apierrs.IsNotFound(err) { - e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err) - return err - } - e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, Poll, err) - continue - } - // log now so that current pod info is reported before calling `condition()` - e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v", - podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start)) - if done, err := condition(pod); done { - if err == nil { - e2elog.Logf("Pod %q satisfied condition %q", podName, desc) - } - return err - } - } - return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc) -} - -// WaitForMatchPodsCondition finds match pods based on the input ListOptions. -// waits and checks if all match pods are in the given podCondition -func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error { - e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { - pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts) - if err != nil { - return err - } - conditionNotMatch := []string{} - for _, pod := range pods.Items { - done, err := condition(&pod) - if done && err != nil { - return fmt.Errorf("Unexpected error: %v", err) - } - if !done { - conditionNotMatch = append(conditionNotMatch, format.Pod(&pod)) - } - } - if len(conditionNotMatch) <= 0 { - return err - } - e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch) - } - return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout) -} - // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // the default service account is what is associated with pods when they do not specify a service account // as a result, pods are not able to be provisioned in a namespace until the service account is provisioned @@ -1123,7 +904,7 @@ func deleteNS(c clientset.Interface, dynamicClient dynamic.Interface, namespace logNamespaces(c, namespace) // if we can, check if there were pods remaining with no timestamp. - remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace) + remainingPods, missingTimestamp, _ = e2epod.CountRemainingPods(c, namespace) } // a timeout waiting for namespace deletion happened! @@ -1184,33 +965,6 @@ func logNamespace(c clientset.Interface, namespace string) { e2elog.Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase) } -// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp. -func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) { - // check for remaining pods - pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) - if err != nil { - return 0, 0, err - } - - // nothing remains! - if len(pods.Items) == 0 { - return 0, 0, nil - } - - // stuff remains, log about it - logPodStates(pods.Items) - - // check if there were any pods with missing deletion timestamp - numPods := len(pods.Items) - missingTimestamp := 0 - for _, pod := range pods.Items { - if pod.DeletionTimestamp == nil { - missingTimestamp++ - } - } - return numPods, missingTimestamp, nil -} - // isDynamicDiscoveryError returns true if the error is a group discovery error // only for groups expected to be created/deleted dynamically during e2e tests func isDynamicDiscoveryError(err error) bool { @@ -1315,39 +1069,18 @@ func ContainerInitInvariant(older, newer runtime.Object) error { if err := initContainersInvariants(newPod); err != nil { return err } - oldInit, _, _ := podInitialized(oldPod) - newInit, _, _ := podInitialized(newPod) + oldInit, _, _ := e2epod.Initialized(oldPod) + newInit, _, _ := e2epod.Initialized(newPod) if oldInit && !newInit { - // TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it + // TODO: we may in the future enable resetting Initialized = false if the kubelet needs to restart it // from scratch return fmt.Errorf("pod cannot be initialized and then regress to not being initialized") } return nil } -func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) { - allInit := true - initFailed := false - for _, s := range pod.Status.InitContainerStatuses { - switch { - case initFailed && s.State.Waiting == nil: - return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name) - case allInit && s.State.Waiting == nil: - return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name) - case s.State.Terminated == nil: - allInit = false - case s.State.Terminated.ExitCode != 0: - allInit = false - initFailed = true - case !s.Ready: - return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name) - } - } - return allInit, initFailed, nil -} - func initContainersInvariants(pod *v1.Pod) error { - allInit, initFailed, err := podInitialized(pod) + allInit, initFailed, err := e2epod.Initialized(pod) if err != nil { return err } @@ -1402,201 +1135,6 @@ func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error { return nil } -// WaitForPodRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. -// Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error { - if pod.Status.Phase == v1.PodRunning { - return nil - } - return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout) -} - -// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running. -// Returns an error if timeout occurs first, or pod goes in to failed state. -func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout) -} - -// waitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running. -// The resourceVersion is used when Watching object changes, it tells since when we care -// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state. -func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout) -} - -// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. -func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace)) -} - -func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc { - return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) - if err != nil { - return false, err - } - switch pod.Status.Phase { - case v1.PodRunning: - return true, nil - case v1.PodFailed, v1.PodSucceeded: - return false, conditions.ErrPodCompleted - } - return false, nil - } -} - -// WaitTimeoutForPodEvent waits the given timeout duration for a pod event to occur. -func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error { - return wait.PollImmediate(Poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg)) -} - -func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg string) wait.ConditionFunc { - options := metav1.ListOptions{FieldSelector: eventSelector} - return func() (bool, error) { - events, err := c.CoreV1().Events(namespace).List(options) - if err != nil { - return false, fmt.Errorf("got error while getting pod events: %s", err) - } - for _, event := range events.Items { - if strings.Contains(event.Message, msg) { - return true, nil - } - } - return false, nil - } -} - -// WaitForPodNoLongerRunningInNamespace waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running. -// Returns an error if timeout occurs first. -func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout) -} - -// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop. -func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace)) -} - -func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc { - return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) - if err != nil { - return false, err - } - switch pod.Status.Phase { - case v1.PodFailed, v1.PodSucceeded: - return true, nil - } - return false, nil - } -} - -func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error { - return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace)) -} - -func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc { - return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) - if err != nil { - return false, err - } - switch pod.Status.Phase { - case v1.PodFailed, v1.PodSucceeded: - return false, conditions.ErrPodCompleted - case v1.PodRunning: - return podutil.IsPodReady(pod), nil - } - return false, nil - } -} - -// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state. -// The resourceVersion is used when Watching object changes, it tells since when we care -// about changes to the pod. -func WaitForPodNotPending(c clientset.Interface, ns, podName string) error { - return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns)) -} - -func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc { - return func() (bool, error) { - pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) - if err != nil { - return false, err - } - switch pod.Status.Phase { - case v1.PodPending: - return false, nil - default: - return true, nil - } - } -} - -// waitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate, -// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not -// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully -// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to -// the supplied reason. -func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error { - return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) { - // Only consider Failed pods. Successful pods will be deleted and detected in - // waitForPodCondition's Get call returning `IsNotFound` - if pod.Status.Phase == v1.PodFailed { - if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop - return true, nil - } - return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason) - } - return false, nil - }) -} - -// waitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. -// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get -// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other -// than "not found" then that error is returned and the wait stops. -func waitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error { - return wait.PollImmediate(Poll, timeout, func() (bool, error) { - _, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) - if apierrs.IsNotFound(err) { - return true, nil // done - } - if err != nil { - return true, err // stop wait with error - } - return false, nil - }) -} - -// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long. -func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error { - return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { - if pod.Spec.RestartPolicy == v1.RestartPolicyAlways { - return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName) - } - switch pod.Status.Phase { - case v1.PodSucceeded: - ginkgo.By("Saw pod success") - return true, nil - case v1.PodFailed: - return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status) - default: - return false, nil - } - }) -} - -// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout. -func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error { - return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout) -} - -// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout. -func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error { - return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout) -} - // WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status. func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error { options := metav1.ListOptions{FieldSelector: fields.Set{ @@ -1629,56 +1167,6 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D return err } -// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear. -func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { - return wait.PollImmediate(interval, timeout, func() (bool, error) { - e2elog.Logf("Waiting for pod %s to disappear", podName) - options := metav1.ListOptions{LabelSelector: label.String()} - pods, err := c.CoreV1().Pods(ns).List(options) - if err != nil { - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - found := false - for _, pod := range pods.Items { - if pod.Name == podName { - e2elog.Logf("Pod %s still exists", podName) - found = true - break - } - } - if !found { - e2elog.Logf("Pod %s no longer exists", podName) - return true, nil - } - return false, nil - }) -} - -// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending -// and have condition Status equal to Unschedulable, -// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason. -// Typically called to test that the passed-in pod is Pending and Unschedulable. -func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error { - return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) { - // Only consider Failed pods. Successful pods will be deleted and detected in - // waitForPodCondition's Get call returning `IsNotFound` - if pod.Status.Phase == v1.PodPending { - for _, cond := range pod.Status.Conditions { - if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" { - return true, nil - } - } - } - if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { - return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase) - } - return false, nil - }) -} - // WaitForService waits until the service appears (exist == true), or disappears (exist == false) func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { @@ -1758,87 +1246,6 @@ func countEndpointsNum(e *v1.Endpoints) int { return num } -// PodProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API -// proxy) and verifying that they answer with their own pod name. -type PodProxyResponseChecker struct { - c clientset.Interface - ns string - label labels.Selector - controllerName string - respondName bool // Whether the pod should respond with its own name. - pods *v1.PodList -} - -// NewPodProxyResponseChecker returns a context for checking pods responses. -func NewPodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) PodProxyResponseChecker { - return PodProxyResponseChecker{c, ns, label, controllerName, respondName, pods} -} - -// CheckAllResponses issues GETs to all pods in the context and verify they -// reply with their own pod name. -func (r PodProxyResponseChecker) CheckAllResponses() (done bool, err error) { - successes := 0 - options := metav1.ListOptions{LabelSelector: r.label.String()} - currentPods, err := r.c.CoreV1().Pods(r.ns).List(options) - ExpectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns) - for i, pod := range r.pods.Items { - // Check that the replica list remains unchanged, otherwise we have problems. - if !isElementOf(pod.UID, currentPods) { - return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods) - } - - ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout) - defer cancel() - - body, err := r.c.CoreV1().RESTClient().Get(). - Context(ctx). - Namespace(r.ns). - Resource("pods"). - SubResource("proxy"). - Name(string(pod.Name)). - Do(). - Raw() - - if err != nil { - if ctx.Err() != nil { - // We may encounter errors here because of a race between the pod readiness and apiserver - // proxy. So, we log the error and retry if this occurs. - e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) - return false, nil - } - e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) - continue - } - // The response checker expects the pod's name unless !respondName, in - // which case it just checks for a non-empty response. - got := string(body) - what := "" - if r.respondName { - what = "expected" - want := pod.Name - if got != want { - e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q", - r.controllerName, i+1, pod.Name, want, got) - continue - } - } else { - what = "non-empty" - if len(got) == 0 { - e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response", - r.controllerName, i+1, pod.Name) - continue - } - } - successes++ - e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far", - r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items)) - } - if successes < len(r.pods.Items) { - return false, nil - } - return true, nil -} - // ServerVersionGTE returns true if v is greater than or equal to the server // version. // @@ -1868,99 +1275,6 @@ func KubectlVersion() (*utilversion.Version, error) { return utilversion.ParseSemantic(matches[1]) } -// PodsResponding waits for the pods to response. -func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { - ginkgo.By("trying to dial each unique pod") - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - return wait.PollImmediate(Poll, podRespondingTimeout, NewPodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) -} - -// PodsCreated returns a pod list matched by the given name. -func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - return PodsCreatedByLabel(c, ns, name, replicas, label) -} - -// PodsCreatedByLabel returns a created pod list matched by the given label. -func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) { - timeout := 2 * time.Minute - for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { - options := metav1.ListOptions{LabelSelector: label.String()} - - // List the pods, making sure we observe all the replicas. - pods, err := c.CoreV1().Pods(ns).List(options) - if err != nil { - return nil, err - } - - created := []v1.Pod{} - for _, pod := range pods.Items { - if pod.DeletionTimestamp != nil { - continue - } - created = append(created, pod) - } - e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) - - if int32(len(created)) == replicas { - pods.Items = created - return pods, nil - } - } - return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas) -} - -func podsRunning(c clientset.Interface, pods *v1.PodList) []error { - // Wait for the pods to enter the running state. Waiting loops until the pods - // are running so non-running pods cause a timeout for this test. - ginkgo.By("ensuring each pod is running") - e := []error{} - errorChan := make(chan error) - - for _, pod := range pods.Items { - go func(p v1.Pod) { - errorChan <- WaitForPodRunningInNamespace(c, &p) - }(pod) - } - - for range pods.Items { - err := <-errorChan - if err != nil { - e = append(e, err) - } - } - - return e -} - -// VerifyPods checks if the specified pod is responding. -func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { - return podRunningMaybeResponding(c, ns, name, wantName, replicas, true) -} - -// VerifyPodsRunning checks if the specified pod is running. -func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error { - return podRunningMaybeResponding(c, ns, name, wantName, replicas, false) -} - -func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error { - pods, err := PodsCreated(c, ns, name, replicas) - if err != nil { - return err - } - e := podsRunning(c, pods) - if len(e) > 0 { - return fmt.Errorf("failed to wait for pods running: %v", e) - } - if checkResponding { - err = PodsResponding(c, ns, name, wantName, pods) - if err != nil { - return fmt.Errorf("failed to wait for pods responding: %v", err) - } - } - return nil -} - // ServiceResponding waits for the service to be responding. func ServiceResponding(c clientset.Interface, ns, name string) error { ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name)) @@ -2161,6 +1475,33 @@ func KubectlCmd(args ...string) *exec.Cmd { return cmd } +// LookForStringInPodExec looks for the given string in the output of a command +// executed in a specific pod container. +// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored. +func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { + return LookForString(expectedString, timeout, func() string { + // use the first container + args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} + args = append(args, command...) + return RunKubectlOrDie(args...) + }) +} + +// LookForString looks for the given string in the output of fn, repeatedly calling fn until +// the timeout is reached or the string is found. Returns last log and possibly +// error if the string was not found. +// TODO(alejandrox1): move to pod/ subpkg once kubectl methods are refactored. +func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { + for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) { + result = fn() + if strings.Contains(result, expectedString) { + return + } + } + err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result) + return +} + // KubectlBuilder is used to build, customize and execute a kubectl Command. // Add more functions to customize the builder as needed. type KubectlBuilder struct { @@ -2362,7 +1703,7 @@ func (f *Framework) MatchContainerOutput( }() // Wait for client pod to complete. - podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns) + podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns) // Grab its logs. Get host first. podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{}) @@ -2373,7 +1714,7 @@ func (f *Framework) MatchContainerOutput( if podErr != nil { // Pod failed. Dump all logs from all containers to see what's wrong for _, container := range podStatus.Spec.Containers { - logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name) + logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name) if err != nil { e2elog.Logf("Failed to get logs from node %q pod %q container %q: %v", podStatus.Spec.NodeName, podStatus.Name, container.Name, err) @@ -2388,7 +1729,7 @@ func (f *Framework) MatchContainerOutput( podStatus.Spec.NodeName, podStatus.Name, containerName, err) // Sometimes the actual containers take a second to get started, try to get logs for 60s - logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) + logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName) if err != nil { e2elog.Logf("Failed to get logs from node %q pod %q container %q. %v", podStatus.Spec.NodeName, podStatus.Name, containerName, err) @@ -2437,7 +1778,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, namespace string) { return c.CoreV1().Events(ns).List(opts) }, namespace) - dumpAllPodInfoForNamespace(c, namespace) + e2epod.DumpAllPodInfoForNamespace(c, namespace) // If cluster is large, then the following logs are basically useless, because: // 1. it takes tens of minutes or hours to grab all of them @@ -2468,15 +1809,6 @@ func (o byFirstTimestamp) Less(i, j int) bool { return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp) } -func dumpAllPodInfoForNamespace(c clientset.Interface, namespace string) { - pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{}) - if err != nil { - e2elog.Logf("unable to fetch pod debug info: %v", err) - } - logPodStates(pods.Items) - logPodTerminationMessages(pods.Items) -} - func dumpAllNodeInfo(c clientset.Interface) { // It should be OK to list unschedulable Nodes here. nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) @@ -2893,110 +2225,7 @@ func ScaleResource( if !wait { return nil } - return WaitForControlledPodsRunning(clientset, ns, name, kind) -} - -// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running. -func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error { - rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) - if err != nil { - return err - } - selector, err := getSelectorFromRuntimeObject(rtObject) - if err != nil { - return err - } - replicas, err := getReplicasFromRuntimeObject(rtObject) - if err != nil { - return err - } - err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas)) - if err != nil { - return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) - } - return nil -} - -// WaitForControlledPods waits up to PodListTimeout for getting pods of the specified controller name and return them. -func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) { - rtObject, err := getRuntimeObjectForKind(c, kind, ns, name) - if err != nil { - return nil, err - } - selector, err := getSelectorFromRuntimeObject(rtObject) - if err != nil { - return nil, err - } - return WaitForPodsWithLabel(c, ns, selector) -} - -// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one -// matching pod exists. Return the list of matching pods. -func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { - err = wait.PollImmediate(Poll, podScheduledBeforeTimeout, - func() (bool, error) { - pods, err = WaitForPodsWithLabel(c, ns, label) - if err != nil { - return false, err - } - for _, pod := range pods.Items { - if pod.Spec.NodeName == "" { - return false, nil - } - } - return true, nil - }) - return pods, err -} - -// WaitForPodsWithLabel waits up to PodListTimeout for getting pods with certain label -func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { - for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) { - options := metav1.ListOptions{LabelSelector: label.String()} - pods, err = c.CoreV1().Pods(ns).List(options) - if err != nil { - if testutils.IsRetryableAPIError(err) { - continue - } - return - } - if len(pods.Items) > 0 { - break - } - } - if pods == nil || len(pods.Items) == 0 { - err = fmt.Errorf("Timeout while waiting for pods with label %v", label) - } - return -} - -// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready. -// Return the list of matching pods. -func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { - var current int - err = wait.Poll(Poll, timeout, - func() (bool, error) { - pods, err := WaitForPodsWithLabel(c, ns, label) - if err != nil { - e2elog.Logf("Failed to list pods: %v", err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - current = 0 - for _, pod := range pods.Items { - if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true { - current++ - } - } - if current != num { - e2elog.Logf("Got %v pods running and ready, expect: %v", current, num) - return false, nil - } - return true, nil - }) - return pods, err + return e2epod.WaitForControlledPodsRunning(clientset, ns, name, kind) } func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) { @@ -3135,7 +2364,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns timeout = timeout + 3*time.Minute } - err = waitForPodsInactive(ps, interval, timeout) + err = e2epod.WaitForPodsInactive(ps, interval, timeout) if err != nil { return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) } @@ -3145,129 +2374,13 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns // In gce, at any point, small percentage of nodes can disappear for // ~10 minutes due to hostError. 20 minutes should be long enough to // restart VM in that case and delete the pod. - err = waitForPodsGone(ps, interval, 20*time.Minute) + err = e2epod.WaitForPodsGone(ps, interval, 20*time.Minute) if err != nil { return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) } return nil } -// waitForPodsInactive waits until there are no active pods left in the PodStore. -// This is to make a fair comparison of deletion time between DeleteRCAndPods -// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas -// when the pod is inactvie. -func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error { - var activePods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - pods := ps.List() - activePods = nil - for _, pod := range pods { - if controller.IsPodActive(pod) { - activePods = append(activePods, pod) - } - } - - if len(activePods) != 0 { - return false, nil - } - return true, nil - }) - - if err == wait.ErrWaitTimeout { - for _, pod := range activePods { - e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName) - } - return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName) - } - return err -} - -// waitForPodsGone waits until there are no pods left in the PodStore. -func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error { - var pods []*v1.Pod - err := wait.PollImmediate(interval, timeout, func() (bool, error) { - if pods = ps.List(); len(pods) == 0 { - return true, nil - } - return false, nil - }) - - if err == wait.ErrWaitTimeout { - for _, pod := range pods { - e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName) - } - return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName) - } - return err -} - -// WaitForPodsReady waits for the pods to become ready. -func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error { - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - options := metav1.ListOptions{LabelSelector: label.String()} - return wait.Poll(Poll, 5*time.Minute, func() (bool, error) { - pods, err := c.CoreV1().Pods(ns).List(options) - if err != nil { - return false, nil - } - for _, pod := range pods.Items { - if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) { - return false, nil - } - } - return true, nil - }) -} - -// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them, -// returning their names if it can do so before timeout. -func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { - var pods []*v1.Pod - var errLast error - found := wait.Poll(Poll, timeout, func() (bool, error) { - allPods := ps.List() - pods = FilterNonRestartablePods(allPods) - if len(pods) != expect { - errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) - e2elog.Logf("Error getting pods: %v", errLast) - return false, nil - } - return true, nil - }) == nil - podNames := make([]string, len(pods)) - for i, p := range pods { - podNames[i] = p.ObjectMeta.Name - } - if !found { - return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v", - expect, timeout, errLast) - } - return podNames, nil -} - -// FilterNonRestartablePods filters out pods that will never get recreated if deleted after termination. -func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod { - var results []*v1.Pod - for _, p := range pods { - if isNotRestartAlwaysMirrorPod(p) { - // Mirror pods with restart policy == Never will not get - // recreated if they are deleted after the pods have - // terminated. For now, we discount such pods. - // https://github.com/kubernetes/kubernetes/issues/34003 - continue - } - results = append(results, p) - } - return results -} - -func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool { - if !kubepod.IsMirrorPod(p) { - return false - } - return p.Spec.RestartPolicy != v1.RestartPolicyAlways -} - type updateDSFunc func(*apps.DaemonSet) // UpdateDaemonSetWithRetries updates daemonsets with the given applyUpdate func @@ -3311,30 +2424,6 @@ func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string return hosts } -// NewExecPodSpec returns the pod spec of hostexec pod -func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod { - immediate := int64(0) - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "hostexec", - Image: imageutils.GetE2EImage(imageutils.Hostexec), - ImagePullPolicy: v1.PullIfNotPresent, - }, - }, - HostNetwork: hostNetwork, - SecurityContext: &v1.PodSecurityContext{}, - TerminationGracePeriodSeconds: &immediate, - }, - } - return pod -} - // RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec` // inside of a shell. func RunHostCmd(ns, name, cmd string) (string, error) { @@ -3367,140 +2456,6 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration } } -// LaunchHostExecPod launches a hostexec pod in the given namespace and waits -// until it's Running -func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod { - hostExecPod := NewExecPodSpec(ns, name, true) - pod, err := client.CoreV1().Pods(ns).Create(hostExecPod) - ExpectNoError(err) - err = WaitForPodRunningInNamespace(client, pod) - ExpectNoError(err) - return pod -} - -// newExecPodSpec returns the pod spec of exec pod -func newExecPodSpec(ns, generateName string) *v1.Pod { - immediate := int64(0) - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: generateName, - Namespace: ns, - }, - Spec: v1.PodSpec{ - TerminationGracePeriodSeconds: &immediate, - Containers: []v1.Container{ - { - Name: "exec", - Image: BusyBoxImage, - Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"}, - }, - }, - }, - } - return pod -} - -// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a -// vessel for kubectl exec commands. -// Returns the name of the created pod. -func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string { - e2elog.Logf("Creating new exec pod") - execPod := newExecPodSpec(ns, generateName) - if tweak != nil { - tweak(execPod) - } - created, err := client.CoreV1().Pods(ns).Create(execPod) - ExpectNoError(err, "failed to create new exec pod in namespace: %s", ns) - err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) { - retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{}) - if err != nil { - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - return retrievedPod.Status.Phase == v1.PodRunning, nil - }) - ExpectNoError(err) - return created.Name -} - -// CreatePodOrFail creates a pod with the specified containerPorts. -func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) { - ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns)) - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "pause", - Image: imageutils.GetPauseImageName(), - Ports: containerPorts, - // Add a dummy environment variable to work around a docker issue. - // https://github.com/docker/docker/issues/14203 - Env: []v1.EnvVar{{Name: "FOO", Value: " "}}, - }, - }, - }, - } - _, err := c.CoreV1().Pods(ns).Create(pod) - ExpectNoError(err, "failed to create pod %s in namespace %s", name, ns) -} - -// DeletePodOrFail deletes the pod of the specified namespace and name. -func DeletePodOrFail(c clientset.Interface, ns, name string) { - ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns)) - err := c.CoreV1().Pods(ns).Delete(name, nil) - ExpectNoError(err, "failed to delete pod %s in namespace %s", name, ns) -} - -// CheckPodsRunningReady returns whether all pods whose names are listed in -// podNames in namespace ns are running and ready, using c and waiting at most -// timeout. -func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready") -} - -// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are -// listed in podNames in namespace ns are running and ready, or succeeded; use -// c and waiting at most timeout. -func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool { - return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded") -} - -// CheckPodsCondition returns whether all pods whose names are listed in podNames -// in namespace ns are in the condition, using c and waiting at most timeout. -func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { - np := len(podNames) - e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) - type waitPodResult struct { - success bool - podName string - } - result := make(chan waitPodResult, len(podNames)) - for _, podName := range podNames { - // Launch off pod readiness checkers. - go func(name string) { - err := WaitForPodCondition(c, ns, name, desc, timeout, condition) - result <- waitPodResult{err == nil, name} - }(podName) - } - // Wait for them all to finish. - success := true - for range podNames { - res := <-result - if !res.success { - e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc) - success = false - } - } - e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames) - return success -} - // WaitForNodeToBeReady returns whether node name is ready within timeout. func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool { return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout) @@ -4197,30 +3152,6 @@ func LookForStringInFile(ns, podName, container, file, expectedString string, ti }) } -// LookForStringInPodExec looks for the given string in the output of a command executed in a specific pod container -func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) { - return LookForString(expectedString, timeout, func() string { - // use the first container - args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"} - args = append(args, command...) - return RunKubectlOrDie(args...) - }) -} - -// LookForString looks for the given string in the output of fn, repeatedly calling fn until -// the timeout is reached or the string is found. Returns last log and possibly -// error if the string was not found. -func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) { - for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) { - result = fn() - if strings.Contains(result, expectedString) { - return - } - } - err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result) - return -} - // getSvcNodePort returns the node port for the given service:port. func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) { svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) @@ -4277,35 +3208,6 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s return "", fmt.Errorf("Failed to find external address for service %v", name) } -// GetPodLogs returns the logs of the specified container (namespace/pod/container). -// TODO(random-liu): Change this to be a member function of the framework. -func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, false) -} - -func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) { - return getPodLogsInternal(c, namespace, podName, containerName, true) -} - -// utility function for gomega Eventually -func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) { - logs, err := c.CoreV1().RESTClient().Get(). - Resource("pods"). - Namespace(namespace). - Name(podName).SubResource("log"). - Param("container", containerName). - Param("previous", strconv.FormatBool(previous)). - Do(). - Raw() - if err != nil { - return "", err - } - if err == nil && strings.Contains(string(logs), "Internal Error") { - return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs)) - } - return string(logs), err -} - // EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created // are actually cleaned up. Currently only implemented for GCE/GKE. func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { @@ -4368,15 +3270,6 @@ func UnblockNetwork(from string, to string) { } } -func isElementOf(podUID types.UID, pods *v1.PodList) bool { - for _, pod := range pods.Items { - if pod.UID == podUID { - return true - } - } - return false -} - // timeout for proxy requests. const proxyTimeout = 2 * time.Minute @@ -4404,7 +3297,8 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (r } } -// GetKubeletPods retrieves the list of pods on the kubelet +// GetKubeletPods retrieves the list of pods on the kubelet. +// TODO(alejandrox1): move to pod subpkg once node methods have been refactored. func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { return getKubeletPods(c, node, "pods") } @@ -4412,10 +3306,13 @@ func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) { // GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods // includes necessary information (e.g., UID, name, namespace for // pods/containers), but do not contain the full spec. +// TODO(alejandrox1): move to pod subpkg once node methods have been refactored. func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) { return getKubeletPods(c, node, "runningpods") } +// TODO(alejandrox1): move to pod subpkg once node methods have been +// refactored. func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) { result := &v1.PodList{} client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort) @@ -4474,10 +3371,10 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingC if err != nil { return err } - err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name) if err != nil { - logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName) + logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName) if logErr != nil { e2elog.Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr) } else { @@ -4518,23 +3415,6 @@ func parseSystemdServices(services string) string { return strings.TrimSpace(strings.Replace(services, ",", " ", -1)) } -// GetPodsInNamespace returns the pods in the given namespace. -func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) { - pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) - if err != nil { - return []*v1.Pod{}, err - } - ignoreSelector := labels.SelectorFromSet(ignoreLabels) - filtered := []*v1.Pod{} - for _, p := range pods.Items { - if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) { - continue - } - filtered = append(filtered, &p) - } - return filtered, nil -} - // RunCmd runs cmd using args and returns its stdout and stderr. It also outputs // cmd's stdout and stderr to their respective OS streams. func RunCmd(command string, args ...string) (string, string, error) { @@ -4581,29 +3461,6 @@ func retryCmd(command string, args ...string) (string, string, error) { return stdout, stderr, err } -// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods. -func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { - for _, pod := range pods.Items { - if !masterNodes.Has(pod.Spec.NodeName) { - if pod.Spec.NodeName != "" { - _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) - gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue)) - scheduledPods = append(scheduledPods, pod) - } else { - _, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled) - gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true)) - gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse)) - if scheduledCondition.Reason == "Unschedulable" { - - notScheduledPods = append(notScheduledPods, pod) - } - } - } - } - return -} - // WaitForStableCluster waits until all existing pods are scheduled and returns their amount. func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { timeout := 10 * time.Minute @@ -4620,13 +3477,13 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int { } allPods.Items = currentPods - scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods) + scheduledPods, currentlyNotScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) for len(currentlyNotScheduledPods) != 0 { time.Sleep(2 * time.Second) allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) ExpectNoError(err) - scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods) + scheduledPods, currentlyNotScheduledPods = e2epod.GetPodsScheduled(masterNodes, allPods) if startTime.Add(timeout).Before(time.Now()) { Failf("Timed out after %v waiting for stable cluster.", timeout) @@ -4908,6 +3765,7 @@ func (f *Framework) NewAgnhostPod(name string, args ...string) *v1.Pod { } // CreateEmptyFileOnPod creates empty file at given path on the pod. +// TODO(alejandrox1): move to subpkg pod once kubectl methods have been refactored. func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error { _, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath)) return err diff --git a/test/e2e/framework/volume/BUILD b/test/e2e/framework/volume/BUILD index 9fab5c1a9ca..8ea32bfa063 100644 --- a/test/e2e/framework/volume/BUILD +++ b/test/e2e/framework/volume/BUILD @@ -14,6 +14,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 93205e65c79..14109726645 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -53,6 +53,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -364,10 +365,10 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod { } } if config.WaitForCompletion { - framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace)) framework.ExpectNoError(podClient.Delete(serverPod.Name, nil)) } else { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod)) if pod == nil { ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName)) pod, err = podClient.Get(serverPodName, metav1.GetOptions{}) @@ -487,7 +488,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) } - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod)) ginkgo.By("Checking that text file contents are perfect.") for i, test := range tests { @@ -566,7 +567,7 @@ func InjectHTML(client clientset.Interface, config TestConfig, fsGroup *int64, v injectPod, err := podClient.Create(injectPod) framework.ExpectNoError(err, "Failed to create injector pod: %v", err) - err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace) + err = e2epod.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace) framework.ExpectNoError(err) } diff --git a/test/e2e/instrumentation/logging/elasticsearch/BUILD b/test/e2e/instrumentation/logging/elasticsearch/BUILD index c18b3639fd1..65e7c7cd73a 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/BUILD +++ b/test/e2e/instrumentation/logging/elasticsearch/BUILD @@ -21,6 +21,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/instrumentation/logging/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/instrumentation/logging/elasticsearch/kibana.go b/test/e2e/instrumentation/logging/elasticsearch/kibana.go index 135b02782a6..c050e40d55d 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/kibana.go +++ b/test/e2e/instrumentation/logging/elasticsearch/kibana.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" "github.com/onsi/ginkgo" @@ -76,7 +77,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) { pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options) framework.ExpectNoError(err) for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, &pod) framework.ExpectNoError(err) } diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index 107369e112a..be5934f7ed9 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -27,6 +27,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/instrumentation/logging/utils" ) @@ -79,7 +80,7 @@ func (p *esLogProvider) Init() error { return err } for _, pod := range pods.Items { - err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, &pod) if err != nil { return err } diff --git a/test/e2e/instrumentation/logging/utils/BUILD b/test/e2e/instrumentation/logging/utils/BUILD index 8f81561673b..e17a07ce52c 100644 --- a/test/e2e/instrumentation/logging/utils/BUILD +++ b/test/e2e/instrumentation/logging/utils/BUILD @@ -26,6 +26,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/k8s.io/utils/integer:go_default_library", ], diff --git a/test/e2e/instrumentation/logging/utils/logging_pod.go b/test/e2e/instrumentation/logging/utils/logging_pod.go index 92d7638b909..52bc598554a 100644 --- a/test/e2e/instrumentation/logging/utils/logging_pod.go +++ b/test/e2e/instrumentation/logging/utils/logging_pod.go @@ -27,6 +27,7 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -130,7 +131,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error { NodeName: p.nodeName, }, }) - return framework.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name) + return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name) } func (p *loadLoggingPod) ExpectedLineCount() int { @@ -194,5 +195,5 @@ func (p *execLoggingPod) Start(f *framework.Framework) error { }, }, }) - return framework.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name) + return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name) } diff --git a/test/e2e/instrumentation/monitoring/BUILD b/test/e2e/instrumentation/monitoring/BUILD index de6c7be921f..47606661224 100644 --- a/test/e2e/instrumentation/monitoring/BUILD +++ b/test/e2e/instrumentation/monitoring/BUILD @@ -40,6 +40,7 @@ go_library( "//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/instrumentation/common:go_default_library", "//test/e2e/scheduling:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 0bb55f8d018..8d8e197454e 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" ) @@ -71,7 +72,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { } // Create test pod with unique name. - framework.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { + e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) { pod.Spec.Containers[0].Name = uniqueContainerName }) defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(uniqueContainerName, &metav1.DeleteOptions{}) diff --git a/test/e2e/kubectl/BUILD b/test/e2e/kubectl/BUILD index 54221e5ffa8..42be70acf06 100644 --- a/test/e2e/kubectl/BUILD +++ b/test/e2e/kubectl/BUILD @@ -36,6 +36,7 @@ go_library( "//test/e2e/framework/endpoints:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/scheduling:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 041a0c40586..b0f583c8778 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -63,6 +63,7 @@ import ( e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" jobutil "k8s.io/kubernetes/test/e2e/framework/job" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/scheduling" testutils "k8s.io/kubernetes/test/utils" @@ -368,7 +369,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) - gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, simplePodSelector) @@ -516,7 +517,7 @@ var _ = SIGDescribe("Kubectl client", func() { WithStdinData("abcd1234"). Exec() framework.ExpectNoError(err) - framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) + e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout) ginkgo.By("running a failing command with --leave-stdin-open") _, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). @@ -539,7 +540,7 @@ var _ = SIGDescribe("Kubectl client", func() { // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { + if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) @@ -567,14 +568,14 @@ var _ = SIGDescribe("Kubectl client", func() { g = func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) gomega.Expect(err).To(gomega.BeNil()) - if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { + if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { - if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { + if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) @@ -593,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command with run") framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") - if !framework.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) { + if !e2epod.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) { framework.Failf("Pod for run-log-test was not ready") } @@ -1213,7 +1214,7 @@ metadata: podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) nsFlag = fmt.Sprintf("--namespace=%v", ns) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, pausePodSelector) @@ -1254,7 +1255,7 @@ metadata: nsFlag = fmt.Sprintf("--namespace=%v", ns) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml"))) framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag) - gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) + gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue()) }) ginkgo.AfterEach(func() { cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) @@ -1438,7 +1439,7 @@ metadata: framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag) ginkgo.By("verifying the pod controlled by " + name + " gets created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) - podlist, err := framework.WaitForPodsWithLabel(c, ns, label) + podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { framework.Failf("Failed getting pod controlled by %s: %v", name, err) } @@ -1483,7 +1484,7 @@ metadata: ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) - podlist, err := framework.WaitForPodsWithLabel(c, ns, label) + podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) } @@ -1498,7 +1499,7 @@ metadata: for _, pod := range pods { podNames = append(podNames, pod.Name) } - if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { + if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { framework.Failf("Pods for rc %s were not ready", rcName) } _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) @@ -1594,7 +1595,7 @@ metadata: ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) - podlist, err := framework.WaitForPodsWithLabel(c, ns, label) + podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 55cdc1202fa..cf326a4a81a 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -119,7 +120,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi // WaitForTerminatedContainer wait till a given container be terminated for a given pod. func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error { - return framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { + return e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { if len(testutils.TerminatedContainers(pod)[containerName]) > 0 { return true, nil } @@ -241,7 +242,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Verifying logs") gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Done"), @@ -278,7 +279,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Verifying logs") gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"), @@ -336,7 +337,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) ginkgo.By("Verifying logs") gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Received expected client data"), @@ -425,7 +426,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Verifying logs") gomega.Eventually(func() (string, error) { - return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") + return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester") }, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll( gomega.ContainSubstring("Accepted client connection"), gomega.ContainSubstring("Received expected client data"), diff --git a/test/e2e/lifecycle/BUILD b/test/e2e/lifecycle/BUILD index d910838f814..4e07224462e 100644 --- a/test/e2e/lifecycle/BUILD +++ b/test/e2e/lifecycle/BUILD @@ -36,6 +36,7 @@ go_library( "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/lifecycle:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/upgrades:go_default_library", "//test/e2e/upgrades/apps:go_default_library", diff --git a/test/e2e/lifecycle/ha_master.go b/test/e2e/lifecycle/ha_master.go index 2789917e498..8e7f5ca1680 100644 --- a/test/e2e/lifecycle/ha_master.go +++ b/test/e2e/lifecycle/ha_master.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) func addMasterReplica(zone string) error { @@ -69,7 +70,7 @@ func removeWorkerNodes(zone string) error { func verifyRCs(c clientset.Interface, ns string, names []string) { for _, name := range names { - framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1)) + framework.ExpectNoError(e2epod.VerifyPods(c, ns, name, true, 1)) } } diff --git a/test/e2e/lifecycle/node_lease.go b/test/e2e/lifecycle/node_lease.go index a5ae9d5cf6a..50593ac0f5b 100644 --- a/test/e2e/lifecycle/node_lease.go +++ b/test/e2e/lifecycle/node_lease.go @@ -26,6 +26,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -41,7 +42,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name - systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{}) + systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) gomega.Expect(err).To(gomega.BeNil()) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { @@ -92,7 +93,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) gomega.Expect(err).To(gomega.BeNil()) }) diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index 76789fa6571..e84e6bc3603 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -31,6 +31,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" testutils "k8s.io/kubernetes/test/utils" @@ -197,7 +198,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) - log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name) + log, err := e2epod.GetPodLogs(c, p.Namespace, p.Name, container.Name) printFn(cIdentifer, log, err, false) // Get log from the previous container. if container.RestartCount > 0 { @@ -260,7 +261,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // For each pod, we do a sanity check to ensure it's running / healthy // or succeeded now, as that's what we'll be checking later. - if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(c, ns, podNames, pods) return false } @@ -283,7 +284,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Ensure all of the pods that we found on this node before the reboot are // running / healthy, or succeeded. - if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) { newPods := ps.List() printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods) return false diff --git a/test/e2e/lifecycle/resize_nodes.go b/test/e2e/lifecycle/resize_nodes.go index 68712f07d0f..ade2ac32e9a 100644 --- a/test/e2e/lifecycle/resize_nodes.go +++ b/test/e2e/lifecycle/resize_nodes.go @@ -25,6 +25,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" ) @@ -49,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name - systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{}) + systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{}) framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { @@ -102,7 +103,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) }) @@ -114,7 +115,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil) - err = framework.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1) @@ -131,7 +132,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { time.Sleep(time.Minute) ginkgo.By("verifying whether the pods from the removed node are recreated") - err = framework.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) }) @@ -145,7 +146,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) originalNodeCount = int32(numNodes) common.NewRCByName(c, ns, name, originalNodeCount, nil) - err = framework.VerifyPods(c, ns, name, true, originalNodeCount) + err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount) framework.ExpectNoError(err) targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1) @@ -160,7 +161,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1)) err = resizeRC(c, ns, name, originalNodeCount+1) framework.ExpectNoError(err) - err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1) + err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount+1) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index bdaea382a06..2ead26f6edd 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -65,13 +66,13 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring all pods are running and ready") allPods := ps.List() - pods := framework.FilterNonRestartablePods(allPods) + pods := e2epod.FilterNonRestartablePods(allPods) originalPodNames = make([]string, len(pods)) for i, p := range pods { originalPodNames[i] = p.ObjectMeta.Name } - if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } @@ -106,10 +107,10 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { // across node restarts. ginkgo.By("ensuring the same number of pods are running and ready after restart") podCheckStart := time.Now() - podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) + podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout) framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) - if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { + if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { pods := ps.List() printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) framework.Failf("At least one pod wasn't running and ready after the restart.") diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index 3bce08ba9e0..ff0110ae7fa 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -62,6 +62,7 @@ go_library( "//test/e2e/framework/endpoints:go_default_library", "//test/e2e/framework/ingress:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/network/scale:go_default_library", diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 1f82363c726..f6c27f6f03a 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -31,6 +31,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" ) @@ -96,7 +97,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // wait for objects for _, ns := range namespaces { - framework.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController")) + e2epod.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController")) framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout) } // it is not enough that pods are running because they may be set to running, but @@ -106,7 +107,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns.Name).List(options) framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name) - err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) + err = e2epod.PodsResponding(c, ns.Name, backendPodName, false, pods) framework.ExpectNoError(err, "waiting for all pods to respond") e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) @@ -145,7 +146,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // wait until the pods have been scheduler, i.e. are not Pending anymore. Remember // that we cannot wait for the pods to be running because our pods terminate by themselves. for _, ns := range namespaces { - err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName) + err := e2epod.WaitForPodNotPending(c, ns.Name, frontendPodName) framework.ExpectNoError(err) } diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index e39d0b59217..b50b7479b4e 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "fmt" @@ -135,7 +136,7 @@ var _ = SIGDescribe("NetworkPolicy", func() { // Create Server with Service in NS-B e2elog.Logf("Waiting for server to come up.") - err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer) framework.ExpectNoError(err) // Create Policy for that service that allows traffic only via namespace B @@ -545,14 +546,14 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se }() e2elog.Logf("Waiting for %s to complete.", podClient.Name) - err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) + err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) framework.ExpectNoError(err, "Pod did not finish as expected.") e2elog.Logf("Waiting for %s to complete.", podClient.Name) - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) if err != nil { // Collect pod logs when we see a failure. - logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) + logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) if logErr != nil { framework.Failf("Error getting container logs: %s", logErr) } @@ -564,7 +565,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se } // Collect the list of pods running in the test namespace. - podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{}) + podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{}) if err != nil { e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err) } @@ -592,13 +593,13 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, }() e2elog.Logf("Waiting for %s to complete.", podClient.Name) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) // We expect an error here since it's a cannot connect test. // Dump debug information if the error was nil. if err == nil { // Collect pod logs when we see a failure. - logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) + logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) if logErr != nil { framework.Failf("Error getting container logs: %s", logErr) } @@ -610,7 +611,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, } // Collect the list of pods running in the test namespace. - podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{}) + podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{}) if err != nil { e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err) } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 93a4ce8e627..344b843ecdf 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" imageutils "k8s.io/kubernetes/test/utils/image" @@ -154,22 +155,22 @@ var _ = SIGDescribe("Services", func() { name1 := "pod1" name2 := "pod2" - framework.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}}) + e2epod.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}}) names[name1] = true err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}}) + e2epod.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}}) names[name2] = true err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}, name2: {80}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.DeletePodOrFail(cs, ns, name1) + e2epod.DeletePodOrFail(cs, ns, name1) delete(names, name1) err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name2: {80}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.DeletePodOrFail(cs, ns, name2) + e2epod.DeletePodOrFail(cs, ns, name2) delete(names, name2) err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) @@ -240,22 +241,22 @@ var _ = SIGDescribe("Services", func() { podname1 := "pod1" podname2 := "pod2" - framework.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1) + e2epod.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1) names[podname1] = true err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2) + e2epod.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2) names[podname2] = true err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}, podname2: {port2}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.DeletePodOrFail(cs, ns, podname1) + e2epod.DeletePodOrFail(cs, ns, podname1) delete(names, podname1) err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname2: {port2}}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) - framework.DeletePodOrFail(cs, ns, podname2) + e2epod.DeletePodOrFail(cs, ns, podname2) delete(names, podname2) err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{}) framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) @@ -505,7 +506,7 @@ var _ = SIGDescribe("Services", func() { jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout) ginkgo.By("verifying the node port is locked") - hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") + hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) @@ -1234,7 +1235,7 @@ var _ = SIGDescribe("Services", func() { err = t.DeleteService(serviceName) framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) - hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") + hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { @@ -1321,12 +1322,12 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err) ginkgo.By("Verifying pods for RC " + t.Name) - framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) + framework.ExpectNoError(e2epod.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName) - execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) + execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) var stdout string if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { @@ -1422,8 +1423,8 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("Prepare allow source ips") // prepare the exec pods // acceptPod are allowed to access the loadbalancer - acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) - dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) + acceptPodName := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) + dropPodName := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) @@ -1521,7 +1522,7 @@ var _ = SIGDescribe("Services", func() { // a pod to test the service. ginkgo.By("hitting the internal load balancer from pod") e2elog.Logf("creating pod with host network") - hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") + hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName) tcpIngressIP := framework.GetIngressPoint(lbIngress) @@ -1824,7 +1825,7 @@ var _ = SIGDescribe("Services", func() { podName := "execpod-noendpoints" ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) - execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { + execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) @@ -2142,7 +2143,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { podName := "execpod-sourceip" ginkgo.By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) - execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { + execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) defer func() { @@ -2281,7 +2282,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) { e2elog.Logf("Creating an exec pod on node %v", nodeName) - execPodName := framework.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) { + execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) defer func() { @@ -2361,7 +2362,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor svcIP = svc.Spec.ClusterIP } - execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) + execPodName := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { e2elog.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(execPodName, nil) diff --git a/test/e2e/node/BUILD b/test/e2e/node/BUILD index 4e73f0ecac6..68877329bd5 100644 --- a/test/e2e/node/BUILD +++ b/test/e2e/node/BUILD @@ -38,6 +38,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index ce08a64e178..2f26f7ffd95 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -67,7 +68,7 @@ func testPreStop(c clientset.Interface, ns string) { }() ginkgo.By("Waiting for pods to come up.") - err = framework.WaitForPodRunningInNamespace(c, podDescr) + err = e2epod.WaitForPodRunningInNamespace(c, podDescr) framework.ExpectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" @@ -112,7 +113,7 @@ func testPreStop(c clientset.Interface, ns string) { } }() - err = framework.WaitForPodRunningInNamespace(c, preStopDescr) + err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr) framework.ExpectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. diff --git a/test/e2e/node/security_context.go b/test/e2e/node/security_context.go index 652f397d841..8959eb49b7d 100644 --- a/test/e2e/node/security_context.go +++ b/test/e2e/node/security_context.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -207,7 +208,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool) pod, err := client.Create(pod) framework.ExpectNoError(err, "Error creating pod %v", pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) testContent := "hello" testFilePath := mountPath + "/TEST" diff --git a/test/e2e/scalability/BUILD b/test/e2e/scalability/BUILD index 83a4d76727d..63797e3c12c 100644 --- a/test/e2e/scalability/BUILD +++ b/test/e2e/scalability/BUILD @@ -39,6 +39,7 @@ go_library( "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/timer:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index ac36e1fdc61..d0a027bdc67 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/timer" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1035,6 +1036,6 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, }, } framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc)) - framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController"))) + framework.ExpectNoError(e2epod.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController"))) e2elog.Logf("Found pod '%s' running", name) } diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 475bfa6906b..56c49d42edb 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -47,6 +47,7 @@ go_library( "//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/job:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/utils:go_default_library", diff --git a/test/e2e/scheduling/equivalence_cache_predicates.go b/test/e2e/scheduling/equivalence_cache_predicates.go index b5d5642deff..4d3a86ee202 100644 --- a/test/e2e/scheduling/equivalence_cache_predicates.go +++ b/test/e2e/scheduling/equivalence_cache_predicates.go @@ -28,6 +28,7 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -61,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // Every test case in this suite assumes that cluster add-on pods stay stable and // cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{}) + systemPods, err := e2epod.GetPodsInNamespace(cs, ns, map[string]string{}) framework.ExpectNoError(err) systemPodsNo = 0 for _, pod := range systemPods { @@ -70,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { } } - err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{}) + err = e2epod.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) for _, node := range nodeList.Items { @@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() { // not for successful RC, since no specific pod name can be provided. _, err := cs.CoreV1().ReplicationControllers(ns).Create(rc) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController"))) + framework.ExpectNoError(e2epod.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController"))) ginkgo.By("Remove node failure domain label") framework.RemoveLabelOffNode(cs, nodeName, k) diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index 3f351525c1e..b1ba8e7c4eb 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework/gpu" jobutil "k8s.io/kubernetes/test/e2e/framework/job" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" imageutils "k8s.io/kubernetes/test/utils/image" @@ -140,10 +141,10 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset") e2elog.Logf("Successfully created daemonset to install Nvidia drivers.") - pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) + pods, err := e2epod.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet")) framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset") - devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) + devicepluginPods, err := e2epod.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet")) if err == nil { e2elog.Logf("Adding deviceplugin addon pod.") pods.Items = append(pods.Items, devicepluginPods.Items...) @@ -265,7 +266,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) { successes := int32(0) for _, podName := range createdPodNames { f.PodClient().WaitForFinish(podName, 5*time.Minute) - logs, err := framework.GetPodLogs(f.ClientSet, ns, podName, "vector-addition") + logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition") framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName) regex := regexp.MustCompile("PASSED") if regex.MatchString(logs) { diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index b7b5a5a7883..30d0fa06853 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -305,7 +306,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { } // Wait for filler pods to schedule. for _, pod := range fillerPods { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) } ginkgo.By("Creating another pod that requires unavailable amount of CPU.") // Create another pod that requires 50% of the largest node CPU resources. @@ -379,7 +380,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName)) + framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) @@ -466,7 +467,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new label yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName)) + framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName)) labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName)) @@ -509,7 +510,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // kubelet and the scheduler: the scheduler might have scheduled a pod // already when the kubelet does not know about its new taint yet. The // kubelet will then refuse to launch the pod. - framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName)) + framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName)) deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{}) framework.ExpectNoError(err) gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName)) @@ -652,7 +653,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod { pod := createPausePod(f, conf) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)) pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod @@ -721,7 +722,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{}) framework.ExpectNoError(err) - scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) + scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) printed := false printOnce := func(msg string) string { @@ -739,7 +740,7 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched // verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) { allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName}) - scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) + scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) printed := false printOnce := func(msg string) string { @@ -818,7 +819,7 @@ func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, NodeSelector: nodeSelector, }) - err := framework.WaitForPodNotPending(f.ClientSet, ns, podName) + err := e2epod.WaitForPodNotPending(f.ClientSet, ns, podName) if expectScheduled { framework.ExpectNoError(err) } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index 8c75df34293..1f4d5a78250 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/replicaset" "github.com/onsi/ginkgo" @@ -121,7 +122,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) } ginkgo.By("Run a high priority pod that use 60% of a node resources.") @@ -181,7 +182,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { } ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) } ginkgo.By("Run a critical pod that use 60% of a node resources.") @@ -297,7 +298,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { ginkgo.By("Wait for pods to be scheduled.") for _, pod := range pods { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) } ginkgo.By("Run a high priority pod with node affinity to the first node.") diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 18d37cd08ca..a81257ffadf 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -81,7 +82,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { err := framework.CheckTestingNSDeletedExcept(cs, ns) framework.ExpectNoError(err) - err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err = e2epod.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) framework.ExpectNoError(err) }) diff --git a/test/e2e/scheduling/taint_based_evictions.go b/test/e2e/scheduling/taint_based_evictions.go index 556b9c45284..555118fd822 100644 --- a/test/e2e/scheduling/taint_based_evictions.go +++ b/test/e2e/scheduling/taint_based_evictions.go @@ -27,6 +27,7 @@ import ( clientset "k8s.io/client-go/kubernetes" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" ) @@ -110,7 +111,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { ginkgo.By("Verifying all pods are running properly") for _, pod := range pods { - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod)) } // get the node API object @@ -162,7 +163,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Expecting pod0 to be evicted immediately") - err = framework.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) { + err = e2epod.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) { // as node is unreachable, pod0 is expected to be in Terminating status // rather than getting deleted if pod.DeletionTimestamp != nil { @@ -173,7 +174,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { framework.ExpectNoError(err) ginkgo.By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300") - err = framework.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) { + err = e2epod.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) { if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil { return seconds == 300, nil } diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 7cf64901cad..ad73951665e 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -106,7 +107,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) - pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones @@ -215,12 +216,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) framework.ExpectNoError(err) // Wait for all of them to be scheduled ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index f685fdd1f1d..3655a161c31 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" ) @@ -199,10 +200,10 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) defer func() { ginkgo.By("Cleaning up pods and PVs") for _, config := range configs { - framework.DeletePodOrFail(c, ns, config.pod.Name) + e2epod.DeletePodOrFail(c, ns, config.pod.Name) } for _, config := range configs { - framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns) + e2epod.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns) framework.PVPVCCleanup(c, ns, config.pv, config.pvc) err = framework.DeletePVSource(config.pvSource) framework.ExpectNoError(err) @@ -240,7 +241,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) ginkgo.By("Waiting for all pods to be running") for _, config := range configs { - err = framework.WaitForPodRunningInNamespace(c, config.pod) + err = e2epod.WaitForPodRunningInNamespace(c, config.pod) framework.ExpectNoError(err) } } diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 5db5b9dd909..018621ca4d7 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -68,6 +68,7 @@ go_library( "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/testfiles:go_default_library", diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 47229a796ee..b340ded27da 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -33,10 +33,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -261,7 +261,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { if pod == nil { return } - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) ginkgo.By("Checking if VolumeAttachment was created for the pod") @@ -336,7 +336,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { if pod == nil { return } - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod: %v", err) ginkgo.By("Checking CSI driver logs") @@ -367,13 +367,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { _, _, pod1 := createPod() gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod") - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) _, _, pod2 := createPod() gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod") - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace) framework.ExpectNoError(err, "Failed to start pod2: %v", err) _, _, pod3 := createPod() @@ -434,7 +434,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) ginkgo.By("Expanding current pvc") @@ -525,7 +525,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion") - err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) framework.ExpectNoError(err, "Failed to start pod1: %v", err) ginkgo.By("Expanding current pvc") @@ -711,7 +711,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai "csi.storage.k8s.io/serviceAccount.name": "default", } // Load logs of driver pod - log, err := framework.GetPodLogs(cs, namespace, driverPodName, driverContainerName) + log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName) if err != nil { return fmt.Errorf("could not load CSI driver logs: %s", err) } diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 08fc7b87fef..e54d3a2e80b 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -21,6 +21,7 @@ import ( storagev1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -159,7 +160,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000", framework.NodeSelection{Selector: nodeSelector}) defer testsuites.StopPod(cs, pod) - framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") + framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") } test.TestDynamicProvisioning() } diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD index 8c1cefbd564..7aa0fb123d5 100644 --- a/test/e2e/storage/drivers/BUILD +++ b/test/e2e/storage/drivers/BUILD @@ -24,6 +24,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/testsuites:go_default_library", diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index d07290fb50a..d63cd512485 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -56,6 +56,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -861,7 +862,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod) framework.ExpectNoError(err, "while creating hostPath init pod") - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) @@ -883,7 +884,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) framework.ExpectNoError(err, "while creating hostPath teardown pod") - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 56b949752b6..8a34bb6d261 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -410,7 +411,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume framework.ExpectNoError(err) }() - pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) + pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount) ginkgo.By("Ensuring each pod is running") diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 1973436540f..72b43a2712a 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -22,6 +22,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -29,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -262,7 +264,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew framework.DeletePodWithWait(f, c, pod) } }() - err = framework.WaitForPodRunningInNamespace(c, pod) + err = e2epod.WaitForPodRunningInNamespace(c, pod) framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) // Return created api objects pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 4094beaced9..eb5743399db 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -40,6 +40,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -227,7 +228,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.AfterEach(func() { ginkgo.By("Deleting pod1") - framework.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) }) ginkgo.It("should be able to mount volume and read from pod1", func() { @@ -269,7 +270,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Checking fsGroup is set") pod := createPodWithFsGroupTest(config, testVol, 1234, 1234) ginkgo.By("Deleting pod") - framework.DeletePodOrFail(config.client, config.ns, pod.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod.Name) }) ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func() { @@ -279,9 +280,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Create second pod with same fsGroup and check fsGroup is correct") pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup) ginkgo.By("Deleting first pod") - framework.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) ginkgo.By("Deleting second pod") - framework.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) }) ginkgo.It("should set different fsGroup for second pod if first pod is deleted", func() { @@ -295,7 +296,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { ginkgo.By("Create second pod and check fsGroup is the new one") pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2) ginkgo.By("Deleting second pod") - framework.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) }) }) @@ -317,7 +318,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) pod, err := createLocalPod(config, testVol, nil) framework.ExpectError(err) - err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err) cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) }) @@ -334,7 +335,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err) cleanupLocalVolumes(config, []*localTestVolume{testVol}) @@ -703,7 +704,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp pod, err := config.client.CoreV1().Pods(config.ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) + err = e2epod.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace) framework.ExpectNoError(err) } @@ -741,9 +742,9 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) { testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType) ginkgo.By("Deleting pod1") - framework.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) ginkgo.By("Deleting pod2") - framework.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) } // Test two pods one after other, write from pod1, and read from pod2 @@ -762,7 +763,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType) ginkgo.By("Deleting pod1") - framework.DeletePodOrFail(config.client, config.ns, pod1.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name) ginkgo.By("Creating pod2") pod2, pod2Err := createLocalPod(config, testVol, nil) @@ -773,7 +774,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) ginkgo.By("Deleting pod2") - framework.DeletePodOrFail(config.client, config.ns, pod2.Name) + e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name) } // Test creating pod with fsGroup, and check fsGroup is expected fsGroup. diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 194d8b899df..92009a7fe08 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -281,7 +282,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { pod := framework.MakeWritePod(ns, pvc) pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) ginkgo.By("Deleting the claim") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) @@ -299,7 +300,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount)) pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns)) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) e2elog.Logf("Pod exited without failure; the volume has been recycled.") }) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 63731530279..19897bc88c2 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -40,6 +40,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -272,7 +273,7 @@ func testZonalFailover(c clientset.Interface, ns string) { "The same PVC should be used after failover.") ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.") - logs, err := framework.GetPodLogs(c, ns, pod.Name, "") + logs, err := e2epod.GetPodLogs(c, ns, pod.Name, "") framework.ExpectNoError(err, "Error getting logs from pod %s in namespace %s", pod.Name, ns) lineCount := len(strings.Split(strings.TrimSpace(logs), "\n")) diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index 80f09dfd4f7..782146b02b6 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -36,6 +36,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 2ea8ee109ff..c545c85f7c1 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -34,6 +34,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -358,7 +359,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent // pod might be nil now. StopPod(client, pod) }() - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -414,7 +415,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -430,7 +431,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai command = "select-string 'hello world' /mnt/test/data" } pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace)) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node") @@ -496,8 +497,8 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P } framework.ExpectNoError(err) defer func() { - framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) - framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) + e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) + e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) }() if expectUnschedulable { // Verify that no claims are provisioned. @@ -532,7 +533,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) { pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) defer StopPod(c, pod) - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) } // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory @@ -597,7 +598,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { } else { e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body) } - framework.DeletePodOrFail(c, pod.Namespace, pod.Name) + e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name) } func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index d1808e922ce..981c9bebf22 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -433,7 +434,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T }() // Wait for pod to be running - err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod) framework.ExpectNoError(err, "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory @@ -795,7 +796,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { defer func() { framework.DeletePodWithWait(f, f.ClientSet, pod) }() - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) framework.ExpectNoError(err, "while waiting for pod to be running") ginkgo.By("Failing liveness probe") @@ -886,7 +887,7 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err, "while creating pod") - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) framework.ExpectNoError(err, "while waiting for pod to be running") pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) @@ -900,7 +901,7 @@ func formatVolume(f *framework.Framework, pod *v1.Pod) { pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err, "while creating volume init pod") - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) framework.ExpectNoError(err, "while waiting for volume init pod to succeed") err = framework.DeletePodWithWait(f, f.ClientSet, pod) diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 6bfe5332f71..3b4bfc25932 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -30,11 +30,13 @@ import ( "time" "github.com/onsi/ginkgo" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -320,7 +322,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume. } }() - err = framework.WaitForPodRunningInNamespace(cs, clientPod) + err = e2epod.WaitForPodRunningInNamespace(cs, clientPod) if err != nil { return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) } diff --git a/test/e2e/storage/utils/BUILD b/test/e2e/storage/utils/BUILD index 72ec9a6419d..92581616751 100644 --- a/test/e2e/storage/utils/BUILD +++ b/test/e2e/storage/utils/BUILD @@ -27,6 +27,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/storage/utils/host_exec.go b/test/e2e/storage/utils/host_exec.go index 48c1e71f900..d0611690ad4 100644 --- a/test/e2e/storage/utils/host_exec.go +++ b/test/e2e/storage/utils/host_exec.go @@ -21,6 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) // HostExec represents interface we require to execute commands on remote host. @@ -50,7 +51,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { f := h.Framework cs := f.ClientSet ns := f.Namespace - hostExecPod := framework.NewExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node), true) + hostExecPod := e2epod.NewExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node), true) hostExecPod.Spec.NodeName = node hostExecPod.Spec.Volumes = []v1.Volume{ { @@ -77,7 +78,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod { } pod, err := cs.CoreV1().Pods(ns.Name).Create(hostExecPod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(cs, pod) + err = e2epod.WaitForPodRunningInNamespace(cs, pod) framework.ExpectNoError(err) return pod } @@ -118,7 +119,7 @@ func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error { // pods under test namespace which will be destroyed in teardown phase. func (h *hostExecutor) Cleanup() { for _, pod := range h.nodeExecPods { - framework.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name) + e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name) } h.nodeExecPods = make(map[string]*v1.Pod) } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 7332224e0f4..80a341e7aaf 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -27,6 +27,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -35,6 +36,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" imageutils "k8s.io/kubernetes/test/utils/image" uexec "k8s.io/utils/exec" @@ -323,9 +325,9 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) { pod, err := c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "Failed to create pod: %v", err) defer func() { - framework.DeletePodOrFail(c, ns, pod.Name) + e2epod.DeletePodOrFail(c, ns, pod.Name) }() - framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) + framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace)) } func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod { @@ -392,7 +394,7 @@ func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginNa provisionerPod, err := podClient.Create(provisionerPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) ginkgo.By("locating the provisioner pod") pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index d4be2056022..ab1be5e8ff3 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/metrics" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -115,8 +116,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(c, pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) + err = e2epod.WaitForPodRunningInNamespace(c, pod) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod)) @@ -174,7 +175,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) - err = framework.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) + err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) framework.ExpectError(err) e2elog.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) @@ -200,8 +201,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(c, pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) + err = e2epod.WaitForPodRunningInNamespace(c, pod) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -257,8 +258,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(c, pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) + err = e2epod.WaitForPodRunningInNamespace(c, pod) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -288,8 +289,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(c, pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) + err = e2epod.WaitForPodRunningInNamespace(c, pod) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -325,8 +326,8 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { // Create pod pod, err = c.CoreV1().Pods(ns).Create(pod) framework.ExpectNoError(err) - err = framework.WaitForPodRunningInNamespace(c, pod) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) + err = e2epod.WaitForPodRunningInNamespace(c, pod) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, pod), "Error starting pod ", pod.Name) pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 420a5790e7b..00c63a89d21 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -28,18 +28,17 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" - apierrs "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - v1 "k8s.io/api/core/v1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" storage "k8s.io/api/storage/v1" storagebeta "k8s.io/api/storage/v1beta1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" @@ -48,6 +47,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework/auth" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -738,7 +738,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("creating an external dynamic provisioner pod") pod := utils.StartExternalProvisioner(c, ns, externalPluginName) - defer framework.DeletePodOrFail(c, ns, pod.Name) + defer e2epod.DeletePodOrFail(c, ns, pod.Name) ginkgo.By("creating a StorageClass") test := testsuites.StorageClassTest{ @@ -1179,7 +1179,7 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod { provisionerPod, err := podClient.Create(provisionerPod) framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) - framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) + framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod)) ginkgo.By("locating the provisioner pod") pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) diff --git a/test/e2e/storage/vsphere/BUILD b/test/e2e/storage/vsphere/BUILD index 0bd4fb3d829..c38f5ad2662 100644 --- a/test/e2e/storage/vsphere/BUILD +++ b/test/e2e/storage/vsphere/BUILD @@ -54,6 +54,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index ae47c1be067..cedd5d5c1bb 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -26,6 +26,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -135,7 +136,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { // After scale up, verify all vsphere volumes are attached to node VMs. ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { - err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) + err := e2epod.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) framework.ExpectNoError(err) pod, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index f6cdfa55885..bcaeeb133a1 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -89,7 +90,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) // get fresh pod info pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index e5ba9cc23d7..3263d1ee57e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -31,6 +31,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -145,7 +146,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be running") - gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) gomega.Expect(isAttached).To(gomega.BeTrue()) diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index a4f71d39334..34ac8164593 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -94,7 +95,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup defer framework.DeletePodWithWait(f, client, pod) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 4b61542259f..e5fbc7a4c0b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -360,7 +361,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st pod, err = client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) ginkgo.By("Waiting for pod to be ready") - gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) for _, volumePath := range volumePaths { diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 08900a90a4c..6d570cee79e 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -120,7 +121,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) - gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) + gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e/windows/BUILD b/test/e2e/windows/BUILD index a0166c0e750..5bfcb5a8d41 100644 --- a/test/e2e/windows/BUILD +++ b/test/e2e/windows/BUILD @@ -32,6 +32,7 @@ go_library( "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 4e344e18106..466d448007b 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -269,7 +270,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) framework.ExpectNoError(err) - gomega.Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + gomega.Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred()) }(pod) } diff --git a/test/e2e_node/BUILD b/test/e2e_node/BUILD index 384714fc4e6..8b480d713ab 100644 --- a/test/e2e_node/BUILD +++ b/test/e2e_node/BUILD @@ -54,6 +54,7 @@ go_library( "//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/coreos/go-systemd/util:go_default_library", @@ -168,6 +169,7 @@ go_test( "//test/e2e/framework:go_default_library", "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/metrics:go_default_library", + "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e_node/perf/workloads:go_default_library", "//test/e2e_node/services:go_default_library", diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index bda5fe1b73f..0e82787240a 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -35,6 +35,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/davecgh/go-spew/spew" . "github.com/onsi/ginkgo" @@ -147,7 +148,7 @@ func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1. pod := createPodWithAppArmor(f, profile) if shouldRun { // The pod needs to start before it stops, so wait for the longer start timeout. - framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace( + framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) } else { // Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor". diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index afbf2a4f2ab..185e1ff9dbb 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -32,6 +32,7 @@ import ( kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" pluginapi "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1" dm "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager" @@ -265,7 +266,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName // parseLog returns the matching string for the specified regular expression parsed from the container logs. func parseLog(f *framework.Framework, podName string, contName string, re string) string { - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index b82dd6ba8e2..bdee8ff4f0a 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -187,7 +188,7 @@ func runHugePagesTests(f *framework.Framework) { By("checking if the expected hugetlb settings were applied") verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi")) f.PodClient().Create(verifyPod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) } diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 198d204c44a..f1bd0c2aaa0 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -20,6 +20,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/davecgh/go-spew/spew" . "github.com/onsi/ginkgo" @@ -49,7 +50,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() { pod := f.PodClient().Create(podDesc) - framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace( + framework.ExpectNoError(e2epod.WaitTimeoutForPodNoLongerRunningInNamespace( f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)) runningPod, err := f.PodClient().Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index fff10b49ee1..87919f700ec 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) const ( @@ -101,7 +102,7 @@ var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() { createAndWaitPod := func(pod *v1.Pod) error { podClient.Create(pod) - return framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + return e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) } var logPodName string diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index 46018df2909..05404fd14cf 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -25,6 +25,7 @@ import ( kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e_node/perf/workloads" . "github.com/onsi/ginkgo" @@ -93,7 +94,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() { pod = f.PodClient().CreateSync(pod) // Wait for pod success. f.PodClient().WaitForSuccess(pod.Name, wl.Timeout()) - podLogs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) + podLogs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) framework.ExpectNoError(err) perf, err := wl.ExtractPerformanceFromLogs(podLogs) framework.ExpectNoError(err) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 80be087abba..ca464fc6f04 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" . "github.com/onsi/ginkgo" @@ -369,14 +370,14 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete AfterEach(func() { if CurrentGinkgoTestDescription().Failed && framework.TestContext.DumpLogsOnFailure { By("Get node problem detector log") - log, err := framework.GetPodLogs(c, ns, name, name) + log, err := e2epod.GetPodLogs(c, ns, name, name) Expect(err).ShouldNot(HaveOccurred()) e2elog.Logf("Node Problem Detector logs:\n %s", log) } By("Delete the node problem detector") f.PodClient().Delete(name, metav1.NewDeleteOptions(0)) By("Wait for the node problem detector to disappear") - Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) + Expect(e2epod.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) By("Delete the config map") c.CoreV1().ConfigMaps(ns).Delete(configName, nil) By("Clean up the events") diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index 2475beea1f0..45ca56c3165 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -130,7 +131,7 @@ func runPodPidsLimitTests(f *framework.Framework) { By("checking if the expected pids settings were applied") verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024")) f.PodClient().Create(verifyPod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) } diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index f7ba0048a3a..b25b34f9885 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" @@ -159,7 +160,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) @@ -197,7 +198,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { cgroupsToVerify := []string{"pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { @@ -205,7 +206,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved("pod" + podUID) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) @@ -241,7 +242,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { cgroupsToVerify := []string{"besteffort/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { @@ -249,7 +250,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) @@ -285,7 +286,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { cgroupsToVerify := []string{"burstable/pod" + podUID} pod := makePodToVerifyCgroups(cgroupsToVerify) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Checking if the pod cgroup was deleted", func() { @@ -293,7 +294,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() { Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred()) pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID) f.PodClient().Create(pod) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) }) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index 63b159e46bd..f404d1d6e63 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/pkg/util/procfs" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e_node/perftype" . "github.com/onsi/ginkgo" @@ -375,7 +376,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30)) Expect(err).NotTo(HaveOccurred()) - Expect(framework.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), + Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(), 30*time.Second, 10*time.Minute)).NotTo(HaveOccurred()) }(pod) } diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index a283ec51a28..5fdf648e04f 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" . "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -160,7 +161,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, true) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -180,7 +181,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, false) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -236,7 +237,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, true) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } @@ -251,7 +252,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, false) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } @@ -319,7 +320,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, true) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -333,7 +334,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, false) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } @@ -385,7 +386,7 @@ var _ = framework.KubeDescribe("Security Context", func() { It("should run the container as privileged when true [NodeFeature:HostAccess]", func() { podName := createAndWaitUserPod(true) - logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) } diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 39f176e5003..2cacf9fbe66 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "fmt" @@ -71,7 +72,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() { }, }, }) - err := framework.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name) + err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name) Expect(err).NotTo(HaveOccurred()) }) By("Verifying the memory backed volume was removed from node", func() { @@ -112,7 +113,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() { }, }, }) - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) + err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name) gp := int64(1) f.PodClient().Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp}) if err == nil {