mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #77526 from alejandrox1/framework_util_pod_refactor
Refactored pod-related functions from framework/util.go
This commit is contained in:
commit
b8ba75bcd9
@ -68,6 +68,7 @@ go_library(
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/providers/aws:go_default_library",
|
||||
"//test/e2e/framework/providers/azure:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
|
@ -84,6 +84,7 @@ go_library(
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/crd:go_default_library",
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -382,7 +383,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
if currentPods != nil {
|
||||
for _, pod := range currentPods.Items {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
logs, err := e2epod.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -111,7 +112,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
ginkgo.By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -818,7 +819,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
|
||||
pod := toBeAttachedPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("'kubectl attach' the pod, should be denied by the webhook")
|
||||
|
@ -65,6 +65,7 @@ go_library(
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
@ -274,7 +275,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
@ -352,7 +353,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
@ -422,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
framework.ExpectNoError(err)
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
@ -859,7 +860,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
e2elog.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
err = e2epod.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
|
||||
|
||||
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -156,7 +157,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the Job readopts the Pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
@ -175,7 +176,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the Job releases the Pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -112,7 +113,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
_, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
@ -157,7 +158,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
|
||||
@ -213,7 +214,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
}()
|
||||
@ -224,7 +225,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
|
||||
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
})
|
||||
@ -243,7 +244,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, replicas)
|
||||
framework.ExpectNoError(err, "Each pod should start running and responding")
|
||||
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@ -268,7 +269,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("verifying whether the pod from the unreachable node is recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -286,7 +287,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
additionalPod := "additionalpod"
|
||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
||||
err = e2epod.VerifyPods(c, ns, additionalPod, true, 1)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// verify that it is really on the requested node
|
||||
@ -310,7 +311,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
replicas := int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, replicas)
|
||||
framework.ExpectNoError(err, "Each pod should start running and responding")
|
||||
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@ -335,7 +336,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||
_, err = framework.PodsCreated(c, ns, name, replicas)
|
||||
_, err = e2epod.PodsCreated(c, ns, name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -408,7 +409,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// The grace period on the stateful pods is set to a value > 0.
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
@ -435,7 +436,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
_, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
@ -452,11 +453,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
_, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -500,7 +501,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
|
||||
if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
@ -606,7 +607,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
|
||||
}))
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -129,7 +130,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
|
||||
// Check that pods for the new RC were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
@ -164,7 +165,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
@ -312,7 +313,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
p := pods.Items[0]
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -131,7 +132,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
@ -166,7 +167,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.NewPodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
@ -305,7 +306,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
p = &pods.Items[0]
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -166,7 +167,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
@ -186,7 +187,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set releases the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "released", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
@ -203,7 +204,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
})
|
||||
|
||||
ginkgo.By("Checking that the stateful set readopts the pod")
|
||||
gomega.Expect(framework.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(c, pod.Namespace, pod.Name, "adopted", framework.StatefulSetTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
|
@ -57,6 +57,7 @@ go_library(
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -158,7 +159,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
|
||||
// check that we are receiving logs in the proxy
|
||||
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
|
||||
if err != nil {
|
||||
e2elog.Logf("waiting for audit-proxy pod logs to be available")
|
||||
return false, nil
|
||||
@ -363,7 +364,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
pollingTimeout := 5 * time.Minute
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
// Fetch the logs
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
@ -90,7 +91,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
ginkgo.By("Running a restricted pod")
|
||||
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
@ -109,7 +110,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
testPrivilegedPods(func(pod *v1.Pod) {
|
||||
p, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -224,7 +225,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
mountedToken, err := f.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, path.Join(serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey))
|
||||
framework.ExpectNoError(err)
|
||||
@ -491,7 +492,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
e2elog.Logf("created pod")
|
||||
if !framework.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
|
||||
if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
|
||||
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
@ -500,7 +501,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
var logs string
|
||||
if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
|
||||
e2elog.Logf("polling logs")
|
||||
logs, err = framework.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
|
||||
logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
|
||||
if err != nil {
|
||||
e2elog.Logf("Error pulling logs: %v", err)
|
||||
return false, nil
|
||||
|
@ -41,6 +41,7 @@ go_library(
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -114,7 +115,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
|
||||
ginkgo.By("Wait for number of running and ready kube-dns pods recover")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
_, err := e2epod.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
ginkgo.By("Wait for kube-dns scaled to expected number")
|
||||
|
@ -78,6 +78,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
@ -126,7 +127,7 @@ done`, testCmd)
|
||||
|
||||
if runOnce {
|
||||
pod = f.PodClient().Create(pod)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name))
|
||||
var err error
|
||||
pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{})
|
||||
@ -242,9 +243,9 @@ func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
|
||||
func getRunningLoaderPod(f *framework.Framework) *api.Pod {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue}))
|
||||
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
|
||||
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
|
||||
framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod")
|
||||
pod := &pods.Items[0]
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod")
|
||||
return pod
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -173,7 +174,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
}
|
||||
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
@ -272,10 +273,10 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs1 := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName1)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName1)
|
||||
}
|
||||
pollLogs2 := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName2)
|
||||
}
|
||||
|
||||
By("Waiting for pod with text data")
|
||||
@ -430,17 +431,17 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -258,7 +259,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": events.ContainerProbeWarning,
|
||||
}.AsSelector().String()
|
||||
framework.ExpectNoError(framework.WaitTimeoutForPodEvent(
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodEvent(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name, expectedEvent, "0.0.0.0", framework.PodEventTimeout))
|
||||
})
|
||||
})
|
||||
@ -419,7 +420,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
|
||||
// Wait until the pod is not pending. (Here we need to check for something other than
|
||||
// 'Pending' other than checking for 'Running', since when failures occur, we go to
|
||||
// 'Terminated' which can cause indefinite blocking.)
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
|
||||
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
|
||||
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
|
||||
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -132,7 +133,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
@ -142,7 +143,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
|
||||
})
|
||||
@ -166,7 +167,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
@ -176,7 +177,7 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
|
||||
})
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -382,7 +383,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
podClient = f.PodClient()
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectError(err, "while waiting for pod to be running")
|
||||
|
||||
By("updating the pod")
|
||||
@ -391,7 +392,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
By("waiting for pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
@ -475,7 +476,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
By("waiting for pod running")
|
||||
err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
By("creating a file in subpath")
|
||||
@ -498,7 +499,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
})
|
||||
|
||||
By("waiting for annotated pod running")
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectNoError(err, "while waiting for annotated pod to be running")
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
@ -614,7 +615,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
err := e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||
|
||||
By("updating the pod")
|
||||
@ -650,7 +651,7 @@ func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
|
||||
err := framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err := e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectError(err, "while waiting for pod to be running")
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -179,7 +180,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
}
|
||||
|
||||
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
@ -358,17 +359,17 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -132,7 +133,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
@ -142,7 +143,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
|
||||
})
|
||||
@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
|
||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
@ -176,7 +177,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
|
||||
})
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -366,17 +367,17 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
@ -130,7 +131,7 @@ func createRuntimeClassPod(f *framework.Framework, runtimeClassName string) *v1.
|
||||
|
||||
// expectPodSuccess waits for the given pod to terminate successfully.
|
||||
func expectPodSuccess(f *framework.Framework, pod *v1.Pod) {
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name))
|
||||
}
|
||||
|
||||
@ -143,6 +144,6 @@ func expectSandboxFailureEvent(f *framework.Framework, pod *v1.Pod, msg string)
|
||||
"involvedObject.namespace": f.Namespace.Name,
|
||||
"reason": events.FailedCreatePodSandBox,
|
||||
}.AsSelector().String()
|
||||
framework.ExpectNoError(framework.WaitTimeoutForPodEvent(
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodEvent(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name, eventSelector, msg, framework.PodEventTimeout))
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -331,17 +332,17 @@ var _ = Describe("[sig-storage] Secrets", func() {
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
@ -257,7 +258,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func() {
|
||||
podName := createAndWaitUserPod(false)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||
if err != nil {
|
||||
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
@ -136,7 +137,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -118,7 +119,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
@ -264,11 +265,11 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||
}
|
||||
}()
|
||||
timeout := 5 * time.Minute
|
||||
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
if err := e2epod.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
e2elog.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||
return
|
||||
}
|
||||
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
logs, err := e2epod.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||
} else {
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -76,7 +77,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
var wg sync.WaitGroup
|
||||
passed := true
|
||||
checkRestart := func(podName string, timeout time.Duration) {
|
||||
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
|
||||
err := e2epod.WaitForPodNameRunningInNamespace(c, podName, ns)
|
||||
framework.ExpectNoError(err)
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
@ -122,7 +123,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
ginkgo.By("creating secret and pod")
|
||||
framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag)
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking if secret was read correctly")
|
||||
@ -140,7 +141,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
err := e2epod.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking if name and namespace were passed correctly")
|
||||
|
@ -49,9 +49,7 @@ go_library(
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/registry/core/service/portallocator:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
@ -111,6 +109,7 @@ go_library(
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
@ -152,6 +151,7 @@ filegroup(
|
||||
"//test/e2e/framework/lifecycle:all-srcs",
|
||||
"//test/e2e/framework/log:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/pod:all-srcs",
|
||||
"//test/e2e/framework/podlogs:all-srcs",
|
||||
"//test/e2e/framework/providers/aws:all-srcs",
|
||||
"//test/e2e/framework/providers/azure:all-srcs",
|
||||
|
@ -48,6 +48,7 @@ import (
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -431,34 +432,34 @@ func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
return e2epod.WaitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
|
||||
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
|
||||
return waitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||
return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||
func (f *Framework) WaitForPodReady(podName string) error {
|
||||
return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningSlow waits for the pod to run in the namespace.
|
||||
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
|
||||
func (f *Framework) WaitForPodRunningSlow(podName string) error {
|
||||
return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
|
||||
// success or failure.
|
||||
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -525,7 +526,7 @@ func (config *NetworkingTestConfig) DeleteNodePortService() {
|
||||
|
||||
func (config *NetworkingTestConfig) createTestPods() {
|
||||
testContainerPod := config.createTestPodSpec()
|
||||
hostTestContainerPod := NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork)
|
||||
hostTestContainerPod := e2epod.NewExecPodSpec(config.Namespace, hostTestPodName, config.HostNetwork)
|
||||
|
||||
config.createPod(testContainerPod)
|
||||
config.createPod(hostTestContainerPod)
|
||||
@ -671,7 +672,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
|
||||
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
config.EndpointPods = config.EndpointPods[1:]
|
||||
// wait for pod being deleted.
|
||||
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
|
||||
if err != nil {
|
||||
Failf("Failed to delete %s pod: %v", pod.Name, err)
|
||||
}
|
||||
|
55
test/e2e/framework/pod/BUILD
Normal file
55
test/e2e/framework/pod/BUILD
Normal file
@ -0,0 +1,55 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"resource.go",
|
||||
"runtimeobject.go",
|
||||
"wait.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/pod",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
689
test/e2e/framework/pod/resource.go
Normal file
689
test/e2e/framework/pod/resource.go
Normal file
@ -0,0 +1,689 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var (
|
||||
// BusyBoxImage is the image URI of BusyBox.
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
)
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithRetries to their own subpackages within framework.
|
||||
// expectNoError checks if "err" is set, and if so, fails assertion while logging the error.
|
||||
func expectNoError(err error, explain ...interface{}) {
|
||||
expectNoErrorWithOffset(1, err, explain...)
|
||||
}
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
|
||||
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
|
||||
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
|
||||
if err != nil {
|
||||
e2elog.Logf("Unexpected error occurred: %v", err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
// TODO: Move to its own subpkg.
|
||||
// expectNoErrorWithRetries checks if an error occurs with the given retry count.
|
||||
func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
|
||||
var err error
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
err = fn()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
e2elog.Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
|
||||
}
|
||||
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred(), explain...)
|
||||
}
|
||||
|
||||
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
for _, pod := range pods.Items {
|
||||
if pod.UID == podUID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with their own pod name.
|
||||
type ProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
label labels.Selector
|
||||
controllerName string
|
||||
respondName bool // Whether the pod should respond with its own name.
|
||||
pods *v1.PodList
|
||||
}
|
||||
|
||||
// NewProxyResponseChecker returns a context for checking pods responses.
|
||||
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
|
||||
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
|
||||
}
|
||||
|
||||
// CheckAllResponses issues GETs to all pods in the context and verify they
|
||||
// reply with their own pod name.
|
||||
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
|
||||
successes := 0
|
||||
options := metav1.ListOptions{LabelSelector: r.label.String()}
|
||||
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
|
||||
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
|
||||
for i, pod := range r.pods.Items {
|
||||
// Check that the replica list remains unchanged, otherwise we have problems.
|
||||
if !isElementOf(pod.UID, currentPods) {
|
||||
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), singleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
body, err := r.c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Namespace(r.ns).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(string(pod.Name)).
|
||||
Do().
|
||||
Raw()
|
||||
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
// We may encounter errors here because of a race between the pod readiness and apiserver
|
||||
// proxy. So, we log the error and retry if this occurs.
|
||||
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
return false, nil
|
||||
}
|
||||
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
|
||||
continue
|
||||
}
|
||||
// The response checker expects the pod's name unless !respondName, in
|
||||
// which case it just checks for a non-empty response.
|
||||
got := string(body)
|
||||
what := ""
|
||||
if r.respondName {
|
||||
what = "expected"
|
||||
want := pod.Name
|
||||
if got != want {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
|
||||
r.controllerName, i+1, pod.Name, want, got)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
what = "non-empty"
|
||||
if len(got) == 0 {
|
||||
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response",
|
||||
r.controllerName, i+1, pod.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
successes++
|
||||
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
|
||||
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
|
||||
}
|
||||
if successes < len(r.pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CountRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
|
||||
func CountRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
|
||||
// check for remaining pods
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
|
||||
// nothing remains!
|
||||
if len(pods.Items) == 0 {
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// stuff remains, log about it
|
||||
LogPodStates(pods.Items)
|
||||
|
||||
// check if there were any pods with missing deletion timestamp
|
||||
numPods := len(pods.Items)
|
||||
missingTimestamp := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil {
|
||||
missingTimestamp++
|
||||
}
|
||||
}
|
||||
return numPods, missingTimestamp, nil
|
||||
}
|
||||
|
||||
// Initialized checks the state of all init containers in the pod.
|
||||
func Initialized(pod *v1.Pod) (ok bool, failed bool, err error) {
|
||||
allInit := true
|
||||
initFailed := false
|
||||
for _, s := range pod.Status.InitContainerStatuses {
|
||||
switch {
|
||||
case initFailed && s.State.Waiting == nil:
|
||||
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
|
||||
case allInit && s.State.Waiting == nil:
|
||||
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
|
||||
case s.State.Terminated == nil:
|
||||
allInit = false
|
||||
case s.State.Terminated.ExitCode != 0:
|
||||
allInit = false
|
||||
initFailed = true
|
||||
case !s.Ready:
|
||||
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
|
||||
}
|
||||
}
|
||||
return allInit, initFailed, nil
|
||||
}
|
||||
|
||||
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
return true, nil
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed, v1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
case v1.PodRunning:
|
||||
return podutil.IsPodReady(pod), nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodPending:
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PodsCreated returns a pod list matched by the given name.
|
||||
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return PodsCreatedByLabel(c, ns, name, replicas, label)
|
||||
}
|
||||
|
||||
// PodsCreatedByLabel returns a created pod list matched by the given label.
|
||||
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
|
||||
timeout := 2 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created := []v1.Pod{}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
created = append(created, pod)
|
||||
}
|
||||
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
|
||||
|
||||
if int32(len(created)) == replicas {
|
||||
pods.Items = created
|
||||
return pods, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
|
||||
}
|
||||
|
||||
// VerifyPods checks if the specified pod is responding.
|
||||
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
|
||||
}
|
||||
|
||||
// VerifyPodsRunning checks if the specified pod is running.
|
||||
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
|
||||
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
|
||||
}
|
||||
|
||||
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
|
||||
pods, err := PodsCreated(c, ns, name, replicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e := podsRunning(c, pods)
|
||||
if len(e) > 0 {
|
||||
return fmt.Errorf("failed to wait for pods running: %v", e)
|
||||
}
|
||||
if checkResponding {
|
||||
err = PodsResponding(c, ns, name, wantName, pods)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to wait for pods responding: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
ginkgo.By("ensuring each pod is running")
|
||||
e := []error{}
|
||||
errorChan := make(chan error)
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
go func(p v1.Pod) {
|
||||
errorChan <- WaitForPodRunningInNamespace(c, &p)
|
||||
}(pod)
|
||||
}
|
||||
|
||||
for range pods.Items {
|
||||
err := <-errorChan
|
||||
if err != nil {
|
||||
e = append(e, err)
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// DumpAllPodInfo logs basic info for all pods.
|
||||
func DumpAllPodInfo(c clientset.Interface) {
|
||||
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
}
|
||||
|
||||
// LogPodStates logs basic info of provided pods for debugging.
|
||||
func LogPodStates(pods []v1.Pod) {
|
||||
// Find maximum widths for pod, node, and phase strings for column printing.
|
||||
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
|
||||
for i := range pods {
|
||||
pod := &pods[i]
|
||||
if len(pod.ObjectMeta.Name) > maxPodW {
|
||||
maxPodW = len(pod.ObjectMeta.Name)
|
||||
}
|
||||
if len(pod.Spec.NodeName) > maxNodeW {
|
||||
maxNodeW = len(pod.Spec.NodeName)
|
||||
}
|
||||
if len(pod.Status.Phase) > maxPhaseW {
|
||||
maxPhaseW = len(pod.Status.Phase)
|
||||
}
|
||||
}
|
||||
// Increase widths by one to separate by a single space.
|
||||
maxPodW++
|
||||
maxNodeW++
|
||||
maxPhaseW++
|
||||
maxGraceW++
|
||||
|
||||
// Log pod info. * does space padding, - makes them left-aligned.
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
|
||||
for _, pod := range pods {
|
||||
grace := ""
|
||||
if pod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
|
||||
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
|
||||
}
|
||||
e2elog.Logf("") // Final empty line helps for readability.
|
||||
}
|
||||
|
||||
// LogPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
|
||||
// why pods crashed and since it is in the API, it's fast to retrieve.
|
||||
func LogPodTerminationMessages(pods []v1.Pod) {
|
||||
for _, pod := range pods {
|
||||
for _, status := range pod.Status.InitContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
|
||||
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
|
||||
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
|
||||
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("unable to fetch pod debug info: %v", err)
|
||||
}
|
||||
LogPodStates(pods.Items)
|
||||
LogPodTerminationMessages(pods.Items)
|
||||
}
|
||||
|
||||
// FilterNonRestartablePods filters out pods that will never get recreated if
|
||||
// deleted after termination.
|
||||
func FilterNonRestartablePods(pods []*v1.Pod) []*v1.Pod {
|
||||
var results []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if isNotRestartAlwaysMirrorPod(p) {
|
||||
// Mirror pods with restart policy == Never will not get
|
||||
// recreated if they are deleted after the pods have
|
||||
// terminated. For now, we discount such pods.
|
||||
// https://github.com/kubernetes/kubernetes/issues/34003
|
||||
continue
|
||||
}
|
||||
results = append(results, p)
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
|
||||
if !kubepod.IsMirrorPod(p) {
|
||||
return false
|
||||
}
|
||||
return p.Spec.RestartPolicy != v1.RestartPolicyAlways
|
||||
}
|
||||
|
||||
// NewExecPodSpec returns the pod spec of hostexec pod
|
||||
func NewExecPodSpec(ns, name string, hostNetwork bool) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "hostexec",
|
||||
Image: imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
HostNetwork: hostNetwork,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
|
||||
// until it's Running
|
||||
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
|
||||
hostExecPod := NewExecPodSpec(ns, name, true)
|
||||
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
|
||||
expectNoError(err)
|
||||
err = WaitForPodRunningInNamespace(client, pod)
|
||||
expectNoError(err)
|
||||
return pod
|
||||
}
|
||||
|
||||
// newExecPodSpec returns the pod spec of exec pod
|
||||
func newExecPodSpec(ns, generateName string) *v1.Pod {
|
||||
immediate := int64(0)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: generateName,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &immediate,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "exec",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
|
||||
// vessel for kubectl exec commands.
|
||||
// Returns the name of the created pod.
|
||||
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
|
||||
e2elog.Logf("Creating new exec pod")
|
||||
execPod := newExecPodSpec(ns, generateName)
|
||||
if tweak != nil {
|
||||
tweak(execPod)
|
||||
}
|
||||
created, err := client.CoreV1().Pods(ns).Create(execPod)
|
||||
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
|
||||
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
|
||||
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return retrievedPod.Status.Phase == v1.PodRunning, nil
|
||||
})
|
||||
expectNoError(err)
|
||||
return created.Name
|
||||
}
|
||||
|
||||
// CreatePodOrFail creates a pod with the specified containerPorts.
|
||||
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
|
||||
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Ports: containerPorts,
|
||||
// Add a dummy environment variable to work around a docker issue.
|
||||
// https://github.com/docker/docker/issues/14203
|
||||
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := c.CoreV1().Pods(ns).Create(pod)
|
||||
expectNoError(err, "failed to create pod %s in namespace %s", name, ns)
|
||||
}
|
||||
|
||||
// DeletePodOrFail deletes the pod of the specified namespace and name.
|
||||
func DeletePodOrFail(c clientset.Interface, ns, name string) {
|
||||
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
|
||||
}
|
||||
|
||||
// CheckPodsRunningReady returns whether all pods whose names are listed in
|
||||
// podNames in namespace ns are running and ready, using c and waiting at most
|
||||
// timeout.
|
||||
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
||||
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
|
||||
}
|
||||
|
||||
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
|
||||
// listed in podNames in namespace ns are running and ready, or succeeded; use
|
||||
// c and waiting at most timeout.
|
||||
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
||||
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
|
||||
}
|
||||
|
||||
// CheckPodsCondition returns whether all pods whose names are listed in podNames
|
||||
// in namespace ns are in the condition, using c and waiting at most timeout.
|
||||
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
|
||||
np := len(podNames)
|
||||
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
|
||||
type waitPodResult struct {
|
||||
success bool
|
||||
podName string
|
||||
}
|
||||
result := make(chan waitPodResult, len(podNames))
|
||||
for _, podName := range podNames {
|
||||
// Launch off pod readiness checkers.
|
||||
go func(name string) {
|
||||
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
|
||||
result <- waitPodResult{err == nil, name}
|
||||
}(podName)
|
||||
}
|
||||
// Wait for them all to finish.
|
||||
success := true
|
||||
for range podNames {
|
||||
res := <-result
|
||||
if !res.success {
|
||||
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
|
||||
return success
|
||||
}
|
||||
|
||||
// GetPodLogs returns the logs of the specified container (namespace/pod/container).
|
||||
// TODO(random-liu): Change this to be a member function of the framework.
|
||||
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, false)
|
||||
}
|
||||
|
||||
// GetPreviousPodLogs returns the logs of the previous instance of the
|
||||
// specified container (namespace/pod/container).
|
||||
func GetPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
|
||||
return getPodLogsInternal(c, namespace, podName, containerName, true)
|
||||
}
|
||||
|
||||
// utility function for gomega Eventually
|
||||
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
|
||||
logs, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("pods").
|
||||
Namespace(namespace).
|
||||
Name(podName).SubResource("log").
|
||||
Param("container", containerName).
|
||||
Param("previous", strconv.FormatBool(previous)).
|
||||
Do().
|
||||
Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err == nil && strings.Contains(string(logs), "Internal Error") {
|
||||
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q", string(logs))
|
||||
}
|
||||
return string(logs), err
|
||||
}
|
||||
|
||||
// GetPodsInNamespace returns the pods in the given namespace.
|
||||
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
filtered := []*v1.Pod{}
|
||||
for _, p := range pods.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, &p)
|
||||
}
|
||||
return filtered, nil
|
||||
}
|
||||
|
||||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
|
||||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
|
||||
for _, pod := range pods.Items {
|
||||
if !masterNodes.Has(pod.Spec.NodeName) {
|
||||
if pod.Spec.NodeName != "" {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionTrue))
|
||||
scheduledPods = append(scheduledPods, pod)
|
||||
} else {
|
||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||
gomega.Expect(scheduledCondition != nil).To(gomega.Equal(true))
|
||||
gomega.Expect(scheduledCondition.Status).To(gomega.Equal(v1.ConditionFalse))
|
||||
if scheduledCondition.Reason == "Unschedulable" {
|
||||
|
||||
notScheduledPods = append(notScheduledPods, pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
124
test/e2e/framework/pod/runtimeobject.go
Normal file
124
test/e2e/framework/pod/runtimeobject.go
Normal file
@ -0,0 +1,124 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
batch "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
)
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
|
||||
switch kind {
|
||||
case api.Kind("ReplicationController"):
|
||||
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
|
||||
return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
|
||||
return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
|
||||
case extensionsinternal.Kind("DaemonSet"):
|
||||
return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
case batchinternal.Kind("Job"):
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
return labels.SelectorFromSet(typed.Spec.Selector), nil
|
||||
case *extensions.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.ReplicaSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.Deployment:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *extensions.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *apps.DaemonSet:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *batch.Job:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This function is generic enough and used enough that it should be
|
||||
// moved to its own subpkg.
|
||||
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||
switch typed := obj.(type) {
|
||||
case *v1.ReplicationController:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.ReplicaSet:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *apps.Deployment:
|
||||
if typed.Spec.Replicas != nil {
|
||||
return *typed.Spec.Replicas, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *extensions.DaemonSet:
|
||||
return 0, nil
|
||||
case *apps.DaemonSet:
|
||||
return 0, nil
|
||||
case *batch.Job:
|
||||
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
|
||||
// that actually finish we need a better way to do this.
|
||||
if typed.Spec.Parallelism != nil {
|
||||
return *typed.Spec.Parallelism, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
|
||||
}
|
||||
}
|
651
test/e2e/framework/pod/wait.go
Normal file
651
test/e2e/framework/pod/wait.go
Normal file
@ -0,0 +1,651 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultPodDeletionTimeout is the default timeout for deleting pod.
|
||||
defaultPodDeletionTimeout = 3 * time.Minute
|
||||
|
||||
// podListTimeout is how long to wait for the pod to be listable.
|
||||
podListTimeout = time.Minute
|
||||
|
||||
podRespondingTimeout = 15 * time.Minute
|
||||
|
||||
// How long pods have to become scheduled onto nodes
|
||||
podScheduledBeforeTimeout = podListTimeout + (20 * time.Second)
|
||||
|
||||
// podStartTimeout is how long to wait for the pod to be started.
|
||||
// Initial pod start can be delayed O(minutes) by slow docker pulls.
|
||||
// TODO: Make this 30 seconds once #4566 is resolved.
|
||||
podStartTimeout = 5 * time.Minute
|
||||
|
||||
// poll is how often to poll pods, nodes and claims.
|
||||
poll = 2 * time.Second
|
||||
pollShortTimeout = 1 * time.Minute
|
||||
pollLongTimeout = 5 * time.Minute
|
||||
|
||||
// singleCallTimeout is how long to try single API calls (like 'get' or 'list'). Used to prevent
|
||||
// transient failures from failing tests.
|
||||
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
|
||||
singleCallTimeout = 5 * time.Minute
|
||||
|
||||
// Some pods can take much longer to get ready due to volume attach/detach latency.
|
||||
slowPodStartTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
type podCondition func(pod *v1.Pod) (bool, error)
|
||||
|
||||
// errorBadPodsStates create error message of basic info of bad pods for debugging.
|
||||
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
|
||||
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
|
||||
// Print bad pods info only if there are fewer than 10 bad pods
|
||||
if len(badPods) > 10 {
|
||||
return errStr + "There are too many bad pods. Please check log for details."
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
|
||||
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
|
||||
for _, badPod := range badPods {
|
||||
grace := ""
|
||||
if badPod.DeletionGracePeriodSeconds != nil {
|
||||
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
|
||||
}
|
||||
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
|
||||
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
|
||||
fmt.Fprintln(w, podInfo)
|
||||
}
|
||||
w.Flush()
|
||||
return errStr + buf.String()
|
||||
}
|
||||
|
||||
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
|
||||
// namespace ns are either running and ready, or failed but controlled by a
|
||||
// controller. Also, it ensures that at least minPods are running and
|
||||
// ready. It has separate behavior from other 'wait for' pods functions in
|
||||
// that it requests the list of pods on every iteration. This is useful, for
|
||||
// example, in cluster startup, because the number of pods increases while
|
||||
// waiting. All pods that are in SUCCESS state are not counted.
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var ignoreNotReady bool
|
||||
badPods := []v1.Pod{}
|
||||
desiredPods := 0
|
||||
notReady := int32(0)
|
||||
|
||||
if wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
// We get the new list of pods, replication controllers, and
|
||||
// replica sets in every iteration because more pods come
|
||||
// online during startup and we want to ensure they are also
|
||||
// checked.
|
||||
replicas, replicaOk := int32(0), int32(0)
|
||||
|
||||
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for _, rc := range rcList.Items {
|
||||
replicas += *rc.Spec.Replicas
|
||||
replicaOk += rc.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
for _, rs := range rsList.Items {
|
||||
replicas += *rs.Spec.Replicas
|
||||
replicaOk += rs.Status.ReadyReplicas
|
||||
}
|
||||
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
nOk := int32(0)
|
||||
notReady = int32(0)
|
||||
badPods = []v1.Pod{}
|
||||
desiredPods = len(podList.Items)
|
||||
for _, pod := range podList.Items {
|
||||
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
||||
continue
|
||||
}
|
||||
res, err := testutils.PodRunningReady(&pod)
|
||||
switch {
|
||||
case res && err == nil:
|
||||
nOk++
|
||||
case pod.Status.Phase == v1.PodSucceeded:
|
||||
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
|
||||
// it doesn't make sense to wait for this pod
|
||||
continue
|
||||
case pod.Status.Phase != v1.PodFailed:
|
||||
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
|
||||
notReady++
|
||||
badPods = append(badPods, pod)
|
||||
default:
|
||||
if metav1.GetControllerOf(&pod) == nil {
|
||||
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
|
||||
badPods = append(badPods, pod)
|
||||
}
|
||||
//ignore failed pods that are controlled by some controller
|
||||
}
|
||||
}
|
||||
|
||||
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
|
||||
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
|
||||
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
|
||||
|
||||
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
ignoreNotReady = (notReady <= allowedNotReadyPods)
|
||||
LogPodStates(badPods)
|
||||
return false, nil
|
||||
}) != nil {
|
||||
if !ignoreNotReady {
|
||||
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
|
||||
}
|
||||
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForPodCondition waits a pods to be matched to the given condition.
|
||||
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, poll, err)
|
||||
continue
|
||||
}
|
||||
// log now so that current pod info is reported before calling `condition()`
|
||||
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
|
||||
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
|
||||
if done, err := condition(pod); done {
|
||||
if err == nil {
|
||||
e2elog.Logf("Pod %q satisfied condition %q", podName, desc)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
|
||||
}
|
||||
|
||||
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
|
||||
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
|
||||
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
|
||||
// the supplied reason.
|
||||
func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
|
||||
return true, nil
|
||||
}
|
||||
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
|
||||
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
|
||||
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
|
||||
}
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodSucceeded:
|
||||
ginkgo.By("Saw pod success")
|
||||
return true, nil
|
||||
case v1.PodFailed:
|
||||
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
|
||||
default:
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
|
||||
// and have condition Status equal to Unschedulable,
|
||||
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
|
||||
// Typically called to test that the passed-in pod is Pending and Unschedulable.
|
||||
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitForPodCondition(c, namespace, podName, "Unschedulable", podStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
// Only consider Failed pods. Successful pods will be deleted and detected in
|
||||
// waitForPodCondition's Get call returning `IsNotFound`
|
||||
if pod.Status.Phase == v1.PodPending {
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
|
||||
// waits and checks if all match pods are in the given podCondition
|
||||
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
|
||||
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conditionNotMatch := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
done, err := condition(&pod)
|
||||
if done && err != nil {
|
||||
return fmt.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !done {
|
||||
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
|
||||
}
|
||||
}
|
||||
if len(conditionNotMatch) <= 0 {
|
||||
return err
|
||||
}
|
||||
e2elog.Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
|
||||
}
|
||||
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodNameRunningInNamespace waits default amount of time (PodStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespaceSlow waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
|
||||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
|
||||
func WaitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunning(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
|
||||
// Returns an error if timeout occurs first, or pod goes in to failed state.
|
||||
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
return nil
|
||||
}
|
||||
return WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodEvent waits the given timeout duration for a pod event to occur.
|
||||
func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg))
|
||||
}
|
||||
|
||||
func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg string) wait.ConditionFunc {
|
||||
options := metav1.ListOptions{FieldSelector: eventSelector}
|
||||
return func() (bool, error) {
|
||||
events, err := c.CoreV1().Events(namespace).List(options)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("got error while getting pod events: %s", err)
|
||||
}
|
||||
for _, event := range events.Items {
|
||||
if strings.Contains(event.Message, msg) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodNoLongerRunningInNamespace waits the given timeout duration for the specified pod to stop.
|
||||
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podCompleted(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunningInNamespace waits default amount of time (defaultPodDeletionTimeout) for the specified pod to stop running.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, defaultPodDeletionTimeout)
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodReadyInNamespace waits the given timeout diration for the
|
||||
// specified pod to be ready and running.
|
||||
func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, podRunningAndReady(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
|
||||
// The resourceVersion is used when Watching object changes, it tells since when we care
|
||||
// about changes to the pod.
|
||||
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
|
||||
return wait.PollImmediate(poll, podStartTimeout, podNotPending(c, podName, ns))
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
|
||||
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
|
||||
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
|
||||
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
|
||||
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
|
||||
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
|
||||
// than "not found" then that error is returned and the wait stops.
|
||||
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
return true, nil // done
|
||||
}
|
||||
if err != nil {
|
||||
return true, err // stop wait with error
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
|
||||
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
e2elog.Logf("Waiting for pod %s to disappear", podName)
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
found := false
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Name == podName {
|
||||
e2elog.Logf("Pod %s still exists", podName)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
e2elog.Logf("Pod %s no longer exists", podName)
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// PodsResponding waits for the pods to response.
|
||||
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
|
||||
ginkgo.By("trying to dial each unique pod")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
return wait.PollImmediate(poll, podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
|
||||
}
|
||||
|
||||
// WaitForControlledPodsRunning waits up to 10 minutes for pods to become Running.
|
||||
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
|
||||
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replicas, err := getReplicasFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForControlledPods waits up to podListTimeout for getting pods of the specified controller name and return them.
|
||||
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
|
||||
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WaitForPodsWithLabel(c, ns, selector)
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
|
||||
// matching pod exists. Return the list of matching pods.
|
||||
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
err = wait.PollImmediate(poll, podScheduledBeforeTimeout,
|
||||
func() (bool, error) {
|
||||
pods, err = WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
|
||||
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
|
||||
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(pods.Items) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if pods == nil || len(pods.Items) == 0 {
|
||||
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WaitForPodsWithLabelRunningReady waits for exact amount of matching pods to become running and ready.
|
||||
// Return the list of matching pods.
|
||||
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
|
||||
var current int
|
||||
err = wait.Poll(poll, timeout,
|
||||
func() (bool, error) {
|
||||
pods, err := WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
e2elog.Logf("Failed to list pods: %v", err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
current = 0
|
||||
for _, pod := range pods.Items {
|
||||
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
|
||||
current++
|
||||
}
|
||||
}
|
||||
if current != num {
|
||||
e2elog.Logf("Got %v pods running and ready, expect: %v", current, num)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForPodsInactive waits until there are no active pods left in the PodStore.
|
||||
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func WaitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = nil
|
||||
for _, pod := range pods {
|
||||
if controller.IsPodActive(pod) {
|
||||
activePods = append(activePods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
e2elog.Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func WaitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
e2elog.Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for the pods to become ready.
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
|
||||
// returning their names if it can do so before timeout.
|
||||
func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
|
||||
var pods []*v1.Pod
|
||||
var errLast error
|
||||
found := wait.Poll(poll, timeout, func() (bool, error) {
|
||||
allPods := ps.List()
|
||||
pods = FilterNonRestartablePods(allPods)
|
||||
if len(pods) != expect {
|
||||
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
|
||||
e2elog.Logf("Error getting pods: %v", errLast)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}) == nil
|
||||
podNames := make([]string, len(pods))
|
||||
for i, p := range pods {
|
||||
podNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
if !found {
|
||||
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
|
||||
expect, timeout, errLast)
|
||||
}
|
||||
return podNames, nil
|
||||
}
|
@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -110,7 +111,7 @@ func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod {
|
||||
// CreateSyncInNamespace creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
|
||||
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
|
||||
p := c.Create(pod)
|
||||
ExpectNoError(WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
|
||||
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
|
||||
p, err := c.Get(p.Name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
@ -174,7 +175,7 @@ func (c *PodClient) DeleteSyncInNamespace(name string, namespace string, options
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Failf("Failed to delete pod %q: %v", name, err)
|
||||
}
|
||||
gomega.Expect(WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
gomega.Expect(e2epod.WaitForPodToDisappear(c.f.ClientSet, namespace, name, labels.Everything(),
|
||||
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
|
||||
}
|
||||
|
||||
@ -218,7 +219,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
// TODO(random-liu): Move pod wait function into this file
|
||||
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -235,7 +236,7 @@ func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
|
||||
// WaitForFailure waits for pod to fail.
|
||||
func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -252,7 +253,7 @@ func (c *PodClient) WaitForFailure(name string, timeout time.Duration) {
|
||||
// WaitForFinish waits for pod to finish running, regardless of success or failure.
|
||||
func (c *PodClient) WaitForFinish(name string, timeout time.Duration) {
|
||||
f := c.f
|
||||
gomega.Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodFailed:
|
||||
@ -293,7 +294,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(pod *v1.Pod) (*v1.Event, error) {
|
||||
// MatchContainerOutput gets output of a container and match expected regexp in the output.
|
||||
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
|
||||
f := c.f
|
||||
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ go_library(
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -57,13 +58,13 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
|
||||
ps, err = testutils.NewPodStore(f.ClientSet, systemNamespace, labels.Everything(), fields.Everything())
|
||||
allPods := ps.List()
|
||||
originalPods := framework.FilterNonRestartablePods(allPods)
|
||||
originalPods := e2epod.FilterNonRestartablePods(allPods)
|
||||
originalPodNames = make([]string, len(originalPods))
|
||||
for i, p := range originalPods {
|
||||
originalPodNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
}
|
||||
|
||||
@ -114,10 +115,10 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
|
||||
// Make sure the pods from before node recreation are running/completed
|
||||
podCheckStart := time.Now()
|
||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout)
|
||||
podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
framework.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
}
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -501,7 +502,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
|
||||
// Test the pod's exit code to be zero.
|
||||
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
ginkgo.By("Pod should terminate with exitcode 0 (success)")
|
||||
if err := WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
|
||||
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
|
||||
}
|
||||
e2elog.Logf("Pod %v succeeded ", pod.Name)
|
||||
@ -856,7 +857,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@ -876,7 +877,7 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to be running
|
||||
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@ -907,7 +908,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string,
|
||||
}
|
||||
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
|
||||
}
|
||||
@ -962,7 +963,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
// Waiting for pod to become Unschedulable
|
||||
err = WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
|
||||
err = e2epod.WaitForPodNameUnschedulableInNamespace(client, pod.Name, namespace)
|
||||
if err != nil {
|
||||
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
@ -137,7 +138,7 @@ func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string)
|
||||
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
|
||||
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
|
||||
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
|
||||
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
return e2epod.WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
|
||||
}
|
||||
|
||||
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -818,7 +819,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
|
||||
|
||||
func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error {
|
||||
timeout := 2 * time.Minute
|
||||
if !CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
|
||||
if !e2epod.CheckPodsRunningReady(j.Client, namespace, pods, timeout) {
|
||||
return fmt.Errorf("timeout waiting for %d pods to be ready", len(pods))
|
||||
}
|
||||
return nil
|
||||
@ -1303,9 +1304,9 @@ func StopServeHostnameService(clientset clientset.Interface, ns, name string) er
|
||||
// in the cluster. Each pod in the service is expected to echo its name. These
|
||||
// names are compared with the given expectedPods list after a sort | uniq.
|
||||
func VerifyServeHostnameServiceUp(c clientset.Interface, ns, host string, expectedPods []string, serviceIP string, servicePort int) error {
|
||||
execPodName := CreateExecPodOrFail(c, ns, "execpod-", nil)
|
||||
execPodName := e2epod.CreateExecPodOrFail(c, ns, "execpod-", nil)
|
||||
defer func() {
|
||||
DeletePodOrFail(c, ns, execPodName)
|
||||
e2epod.DeletePodOrFail(c, ns, execPodName)
|
||||
}()
|
||||
|
||||
// Loop a bunch of times - the proxy is randomized, so we want a good
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -279,7 +280,7 @@ func (s *StatefulSetTester) ConfirmStatefulPodCount(count int, ss *apps.Stateful
|
||||
podList := s.GetPodList(ss)
|
||||
statefulPodCount := len(podList.Items)
|
||||
if statefulPodCount != count {
|
||||
logPodStates(podList.Items)
|
||||
e2epod.LogPodStates(podList.Items)
|
||||
if hard {
|
||||
Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
|
||||
} else {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -14,6 +14,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
|
@ -53,6 +53,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -364,10 +365,10 @@ func StartVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
}
|
||||
}
|
||||
if config.WaitForCompletion {
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
|
||||
framework.ExpectNoError(podClient.Delete(serverPod.Name, nil))
|
||||
} else {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, serverPod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
|
||||
if pod == nil {
|
||||
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
|
||||
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
|
||||
@ -487,7 +488,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in
|
||||
framework.Failf("Failed to create %s pod: %v", clientPod.Name, err)
|
||||
|
||||
}
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, clientPod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod))
|
||||
|
||||
ginkgo.By("Checking that text file contents are perfect.")
|
||||
for i, test := range tests {
|
||||
@ -566,7 +567,7 @@ func InjectHTML(client clientset.Interface, config TestConfig, fsGroup *int64, v
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
|
||||
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -76,7 +77,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
)
|
||||
|
||||
@ -79,7 +80,7 @@ func (p *esLogProvider) Init() error {
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, &pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
],
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -130,7 +131,7 @@ func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||
NodeName: p.nodeName,
|
||||
},
|
||||
})
|
||||
return framework.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func (p *loadLoggingPod) ExpectedLineCount() int {
|
||||
@ -194,5 +195,5 @@ func (p *execLoggingPod) Start(f *framework.Framework) error {
|
||||
},
|
||||
},
|
||||
})
|
||||
return framework.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, p.name, f.Namespace.Name)
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ go_library(
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
)
|
||||
|
||||
@ -71,7 +72,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
}
|
||||
|
||||
// Create test pod with unique name.
|
||||
framework.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
|
||||
e2epod.CreateExecPodOrFail(kubeClient, f.Namespace.Name, uniqueContainerName, func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Name = uniqueContainerName
|
||||
})
|
||||
defer kubeClient.CoreV1().Pods(f.Namespace.Name).Delete(uniqueContainerName, &metav1.DeleteOptions{})
|
||||
|
@ -36,6 +36,7 @@ go_library(
|
||||
"//test/e2e/framework/endpoints:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -63,6 +63,7 @@ import (
|
||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -368,7 +369,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
|
||||
gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{simplePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, simplePodSelector)
|
||||
@ -516,7 +517,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
framework.ExpectNoError(err)
|
||||
framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
|
||||
e2epod.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
|
||||
|
||||
ginkgo.By("running a failing command with --leave-stdin-open")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
|
||||
@ -539,7 +540,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
|
||||
// to loop test.
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test")
|
||||
}
|
||||
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
|
||||
@ -567,14 +568,14 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
g = func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
|
||||
runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
|
||||
}
|
||||
|
||||
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
|
||||
// to loop test.
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
if !framework.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
|
||||
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
|
||||
}
|
||||
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
|
||||
@ -593,7 +594,7 @@ var _ = SIGDescribe("Kubectl client", func() {
|
||||
ginkgo.By("executing a command with run")
|
||||
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
|
||||
|
||||
if !framework.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) {
|
||||
framework.Failf("Pod for run-log-test was not ready")
|
||||
}
|
||||
|
||||
@ -1213,7 +1214,7 @@ metadata:
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
|
||||
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{pausePodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, pausePodSelector)
|
||||
@ -1254,7 +1255,7 @@ metadata:
|
||||
nsFlag = fmt.Sprintf("--namespace=%v", ns)
|
||||
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml")))
|
||||
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
|
||||
gomega.Expect(framework.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
gomega.Expect(e2epod.CheckPodsRunningReady(c, ns, []string{busyboxPodName}, framework.PodStartTimeout)).To(gomega.BeTrue())
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
|
||||
@ -1438,7 +1439,7 @@ metadata:
|
||||
framework.RunKubectlOrDie("run", name, "--image="+nginxImage, nsFlag)
|
||||
ginkgo.By("verifying the pod controlled by " + name + " gets created")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
|
||||
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
|
||||
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting pod controlled by %s: %v", name, err)
|
||||
}
|
||||
@ -1483,7 +1484,7 @@ metadata:
|
||||
|
||||
ginkgo.By("verifying the pod controlled by rc " + rcName + " was created")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
|
||||
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
|
||||
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
|
||||
}
|
||||
@ -1498,7 +1499,7 @@ metadata:
|
||||
for _, pod := range pods {
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
if !framework.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
|
||||
if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
|
||||
framework.Failf("Pods for rc %s were not ready", rcName)
|
||||
}
|
||||
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
|
||||
@ -1594,7 +1595,7 @@ metadata:
|
||||
|
||||
ginkgo.By("verifying the pod controlled by deployment " + dName + " was created")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
|
||||
podlist, err := framework.WaitForPodsWithLabel(c, ns, label)
|
||||
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
|
||||
if err != nil {
|
||||
framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -119,7 +120,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string, bi
|
||||
|
||||
// WaitForTerminatedContainer wait till a given container be terminated for a given pod.
|
||||
func WaitForTerminatedContainer(f *framework.Framework, pod *v1.Pod, containerName string) error {
|
||||
return framework.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
return e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, pod.Name, "container terminated", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
|
||||
if len(testutils.TerminatedContainers(pod)[containerName]) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
@ -241,7 +242,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Done"),
|
||||
@ -278,7 +279,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Expected to read 3 bytes from client, but got 0 instead"),
|
||||
@ -336,7 +337,7 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Received expected client data"),
|
||||
@ -425,7 +426,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
||||
|
||||
ginkgo.By("Verifying logs")
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, "portforwardtester")
|
||||
}, postStartWaitTimeout, podCheckInterval).Should(gomega.SatisfyAll(
|
||||
gomega.ContainSubstring("Accepted client connection"),
|
||||
gomega.ContainSubstring("Received expected client data"),
|
||||
|
@ -36,6 +36,7 @@ go_library(
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/lifecycle:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//test/e2e/upgrades/apps:go_default_library",
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
func addMasterReplica(zone string) error {
|
||||
@ -69,7 +70,7 @@ func removeWorkerNodes(zone string) error {
|
||||
|
||||
func verifyRCs(c clientset.Interface, ns string, names []string) {
|
||||
for _, name := range names {
|
||||
framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1))
|
||||
framework.ExpectNoError(e2epod.VerifyPods(c, ns, name, true, 1))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -41,7 +42,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
@ -92,7 +93,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
})
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -197,7 +198,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
|
||||
// Print the log of the containers if pod is not running and ready.
|
||||
for _, container := range p.Status.ContainerStatuses {
|
||||
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
|
||||
log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name)
|
||||
log, err := e2epod.GetPodLogs(c, p.Namespace, p.Name, container.Name)
|
||||
printFn(cIdentifer, log, err, false)
|
||||
// Get log from the previous container.
|
||||
if container.RestartCount > 0 {
|
||||
@ -260,7 +261,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
||||
|
||||
// For each pod, we do a sanity check to ensure it's running / healthy
|
||||
// or succeeded now, as that's what we'll be checking later.
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
|
||||
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
|
||||
return false
|
||||
}
|
||||
@ -283,7 +284,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
||||
|
||||
// Ensure all of the pods that we found on this node before the reboot are
|
||||
// running / healthy, or succeeded.
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
|
||||
newPods := ps.List()
|
||||
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
|
||||
return false
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -49,7 +50,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
@ -102,7 +103,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -114,7 +115,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||
@ -131,7 +132,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
time.Sleep(time.Minute)
|
||||
|
||||
ginkgo.By("verifying whether the pods from the removed node are recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
@ -145,7 +146,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
|
||||
@ -160,7 +161,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
||||
err = resizeRC(c, ns, name, originalNodeCount+1)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
||||
err = e2epod.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -65,13 +66,13 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
|
||||
ginkgo.By("ensuring all pods are running and ready")
|
||||
allPods := ps.List()
|
||||
pods := framework.FilterNonRestartablePods(allPods)
|
||||
pods := e2epod.FilterNonRestartablePods(allPods)
|
||||
|
||||
originalPodNames = make([]string, len(pods))
|
||||
for i, p := range pods {
|
||||
originalPodNames[i] = p.ObjectMeta.Name
|
||||
}
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
|
||||
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods)
|
||||
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
|
||||
}
|
||||
@ -106,10 +107,10 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
// across node restarts.
|
||||
ginkgo.By("ensuring the same number of pods are running and ready after restart")
|
||||
podCheckStart := time.Now()
|
||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
|
||||
podNamesAfter, err := e2epod.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
|
||||
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
|
||||
pods := ps.List()
|
||||
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods)
|
||||
framework.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
|
@ -62,6 +62,7 @@ go_library(
|
||||
"//test/e2e/framework/endpoints:go_default_library",
|
||||
"//test/e2e/framework/ingress:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -96,7 +97,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
|
||||
// wait for objects
|
||||
for _, ns := range namespaces {
|
||||
framework.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
|
||||
e2epod.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
|
||||
framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
|
||||
}
|
||||
// it is not enough that pods are running because they may be set to running, but
|
||||
@ -106,7 +107,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns.Name).List(options)
|
||||
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
|
||||
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
err = e2epod.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
framework.ExpectNoError(err, "waiting for all pods to respond")
|
||||
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
@ -145,7 +146,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
|
||||
// that we cannot wait for the pods to be running because our pods terminate by themselves.
|
||||
for _, ns := range namespaces {
|
||||
err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName)
|
||||
err := e2epod.WaitForPodNotPending(c, ns.Name, frontendPodName)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"fmt"
|
||||
@ -135,7 +136,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
|
||||
// Create Server with Service in NS-B
|
||||
e2elog.Logf("Waiting for server to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Create Policy for that service that allows traffic only via namespace B
|
||||
@ -545,14 +546,14 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
}()
|
||||
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
framework.ExpectNoError(err, "Pod did not finish as expected.")
|
||||
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
if err != nil {
|
||||
// Collect pod logs when we see a failure.
|
||||
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
if logErr != nil {
|
||||
framework.Failf("Error getting container logs: %s", logErr)
|
||||
}
|
||||
@ -564,7 +565,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
@ -592,13 +593,13 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
|
||||
}()
|
||||
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
|
||||
// We expect an error here since it's a cannot connect test.
|
||||
// Dump debug information if the error was nil.
|
||||
if err == nil {
|
||||
// Collect pod logs when we see a failure.
|
||||
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
if logErr != nil {
|
||||
framework.Failf("Error getting container logs: %s", logErr)
|
||||
}
|
||||
@ -610,7 +611,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -154,22 +155,22 @@ var _ = SIGDescribe("Services", func() {
|
||||
name1 := "pod1"
|
||||
name2 := "pod2"
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
e2epod.CreatePodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
names[name1] = true
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
e2epod.CreatePodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}})
|
||||
names[name2] = true
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name1: {80}, name2: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, name1)
|
||||
e2epod.DeletePodOrFail(cs, ns, name1)
|
||||
delete(names, name1)
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{name2: {80}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, name2)
|
||||
e2epod.DeletePodOrFail(cs, ns, name2)
|
||||
delete(names, name2)
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
@ -240,22 +241,22 @@ var _ = SIGDescribe("Services", func() {
|
||||
podname1 := "pod1"
|
||||
podname2 := "pod2"
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1)
|
||||
e2epod.CreatePodOrFail(cs, ns, podname1, labels, containerPorts1)
|
||||
names[podname1] = true
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2)
|
||||
e2epod.CreatePodOrFail(cs, ns, podname2, labels, containerPorts2)
|
||||
names[podname2] = true
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname1: {port1}, podname2: {port2}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, podname1)
|
||||
e2epod.DeletePodOrFail(cs, ns, podname1)
|
||||
delete(names, podname1)
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{podname2: {port2}})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
|
||||
framework.DeletePodOrFail(cs, ns, podname2)
|
||||
e2epod.DeletePodOrFail(cs, ns, podname2)
|
||||
delete(names, podname2)
|
||||
err = e2eendpoints.ValidateEndpointsPorts(cs, ns, serviceName, e2eendpoints.PortsByPodName{})
|
||||
framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns)
|
||||
@ -505,7 +506,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout)
|
||||
|
||||
ginkgo.By("verifying the node port is locked")
|
||||
hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
|
||||
hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
|
||||
// Even if the node-ip:node-port check above passed, this hostexec pod
|
||||
// might fall on a node with a laggy kube-proxy.
|
||||
cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
|
||||
@ -1234,7 +1235,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
err = t.DeleteService(serviceName)
|
||||
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
|
||||
hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
|
||||
hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec")
|
||||
cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort)
|
||||
var stdout string
|
||||
if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
|
||||
@ -1321,12 +1322,12 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verifying pods for RC " + t.Name)
|
||||
framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1))
|
||||
framework.ExpectNoError(e2epod.VerifyPods(t.Client, t.Namespace, t.Name, false, 1))
|
||||
|
||||
svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
|
||||
ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName)
|
||||
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil)
|
||||
execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil)
|
||||
cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port)
|
||||
var stdout string
|
||||
if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) {
|
||||
@ -1422,8 +1423,8 @@ var _ = SIGDescribe("Services", func() {
|
||||
ginkgo.By("Prepare allow source ips")
|
||||
// prepare the exec pods
|
||||
// acceptPod are allowed to access the loadbalancer
|
||||
acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil)
|
||||
dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil)
|
||||
acceptPodName := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil)
|
||||
dropPodName := e2epod.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil)
|
||||
|
||||
acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace)
|
||||
@ -1521,7 +1522,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
// a pod to test the service.
|
||||
ginkgo.By("hitting the internal load balancer from pod")
|
||||
e2elog.Logf("creating pod with host network")
|
||||
hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
|
||||
hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
|
||||
|
||||
e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
|
||||
tcpIngressIP := framework.GetIngressPoint(lbIngress)
|
||||
@ -1824,7 +1825,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
podName := "execpod-noendpoints"
|
||||
|
||||
ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName))
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
|
||||
execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeName = nodeName
|
||||
})
|
||||
execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{})
|
||||
@ -2142,7 +2143,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
podName := "execpod-sourceip"
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating %v on node %v", podName, nodeName))
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
|
||||
execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) {
|
||||
pod.Spec.NodeName = nodeName
|
||||
})
|
||||
defer func() {
|
||||
@ -2281,7 +2282,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
|
||||
func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) {
|
||||
e2elog.Logf("Creating an exec pod on node %v", nodeName)
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) {
|
||||
execPodName := e2epod.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) {
|
||||
pod.Spec.NodeName = nodeName
|
||||
})
|
||||
defer func() {
|
||||
@ -2361,7 +2362,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
||||
svcIP = svc.Spec.ClusterIP
|
||||
}
|
||||
|
||||
execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
|
||||
execPodName := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
|
||||
defer func() {
|
||||
e2elog.Logf("Cleaning up the exec pod")
|
||||
err := cs.CoreV1().Pods(ns).Delete(execPodName, nil)
|
||||
|
@ -38,6 +38,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -67,7 +68,7 @@ func testPreStop(c clientset.Interface, ns string) {
|
||||
}()
|
||||
|
||||
ginkgo.By("Waiting for pods to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(c, podDescr)
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, podDescr)
|
||||
framework.ExpectNoError(err, "waiting for server pod to start")
|
||||
|
||||
val := "{\"Source\": \"prestop\"}"
|
||||
@ -112,7 +113,7 @@ func testPreStop(c clientset.Interface, ns string) {
|
||||
}
|
||||
}()
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(c, preStopDescr)
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, preStopDescr)
|
||||
framework.ExpectNoError(err, "waiting for tester pod to start")
|
||||
|
||||
// Delete the pod with the preStop handler.
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -207,7 +208,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
|
||||
pod, err := client.Create(pod)
|
||||
|
||||
framework.ExpectNoError(err, "Error creating pod %v", pod)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
testContent := "hello"
|
||||
testFilePath := mountPath + "/TEST"
|
||||
|
@ -39,6 +39,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/timer:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/timer"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -1035,6 +1036,6 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
||||
},
|
||||
}
|
||||
framework.ExpectNoError(testutils.CreateRCWithRetries(c, ns, rc))
|
||||
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
||||
framework.ExpectNoError(e2epod.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
||||
e2elog.Logf("Found pod '%s' running", name)
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ go_library(
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -61,7 +62,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
// Every test case in this suite assumes that cluster add-on pods stay stable and
|
||||
// cannot be run in parallel with any other test that touches Nodes or Pods.
|
||||
// It is so because we need to have precise control on what's running in the cluster.
|
||||
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
|
||||
systemPods, err := e2epod.GetPodsInNamespace(cs, ns, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
systemPodsNo = 0
|
||||
for _, pod := range systemPods {
|
||||
@ -70,7 +71,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
}
|
||||
}
|
||||
|
||||
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err = e2epod.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
|
||||
// not for successful RC, since no specific pod name can be provided.
|
||||
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
|
||||
framework.ExpectNoError(e2epod.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
|
||||
|
||||
ginkgo.By("Remove node failure domain label")
|
||||
framework.RemoveLabelOffNode(cs, nodeName, k)
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -140,10 +141,10 @@ func SetupNVIDIAGPUNode(f *framework.Framework, setupResourceGatherer bool) *fra
|
||||
framework.ExpectNoError(err, "failed to create nvidia-driver-installer daemonset")
|
||||
e2elog.Logf("Successfully created daemonset to install Nvidia drivers.")
|
||||
|
||||
pods, err := framework.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
||||
pods, err := e2epod.WaitForControlledPods(f.ClientSet, ds.Namespace, ds.Name, extensionsinternal.Kind("DaemonSet"))
|
||||
framework.ExpectNoError(err, "failed to get pods controlled by the nvidia-driver-installer daemonset")
|
||||
|
||||
devicepluginPods, err := framework.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
|
||||
devicepluginPods, err := e2epod.WaitForControlledPods(f.ClientSet, "kube-system", "nvidia-gpu-device-plugin", extensionsinternal.Kind("DaemonSet"))
|
||||
if err == nil {
|
||||
e2elog.Logf("Adding deviceplugin addon pod.")
|
||||
pods.Items = append(pods.Items, devicepluginPods.Items...)
|
||||
@ -265,7 +266,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) {
|
||||
successes := int32(0)
|
||||
for _, podName := range createdPodNames {
|
||||
f.PodClient().WaitForFinish(podName, 5*time.Minute)
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, ns, podName, "vector-addition")
|
||||
logs, err := e2epod.GetPodLogs(f.ClientSet, ns, podName, "vector-addition")
|
||||
framework.ExpectNoError(err, "Should be able to get logs for pod %v", podName)
|
||||
regex := regexp.MustCompile("PASSED")
|
||||
if regex.MatchString(logs) {
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -305,7 +306,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
}
|
||||
// Wait for filler pods to schedule.
|
||||
for _, pod := range fillerPods {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
|
||||
}
|
||||
ginkgo.By("Creating another pod that requires unavailable amount of CPU.")
|
||||
// Create another pod that requires 50% of the largest node CPU resources.
|
||||
@ -379,7 +380,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
@ -466,7 +467,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, labelPodName))
|
||||
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(labelPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
@ -509,7 +510,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new taint yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, tolerationPodName))
|
||||
framework.ExpectNoError(e2epod.WaitForPodNotPending(cs, ns, tolerationPodName))
|
||||
deployedPod, err := cs.CoreV1().Pods(ns).Get(tolerationPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(deployedPod.Spec.NodeName).To(gomega.Equal(nodeName))
|
||||
@ -652,7 +653,7 @@ func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
|
||||
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
|
||||
pod := createPausePod(f, conf)
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(conf.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
@ -721,7 +722,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action common.Action, n
|
||||
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
|
||||
allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
|
||||
scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
|
||||
|
||||
printed := false
|
||||
printOnce := func(msg string) string {
|
||||
@ -739,7 +740,7 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
|
||||
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
|
||||
func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) {
|
||||
allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName})
|
||||
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
|
||||
scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
|
||||
|
||||
printed := false
|
||||
printOnce := func(msg string) string {
|
||||
@ -818,7 +819,7 @@ func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string,
|
||||
NodeSelector: nodeSelector,
|
||||
})
|
||||
|
||||
err := framework.WaitForPodNotPending(f.ClientSet, ns, podName)
|
||||
err := e2epod.WaitForPodNotPending(f.ClientSet, ns, podName)
|
||||
if expectScheduled {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -121,7 +122,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
}
|
||||
ginkgo.By("Wait for pods to be scheduled.")
|
||||
for _, pod := range pods {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
|
||||
}
|
||||
|
||||
ginkgo.By("Run a high priority pod that use 60% of a node resources.")
|
||||
@ -181,7 +182,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
}
|
||||
ginkgo.By("Wait for pods to be scheduled.")
|
||||
for _, pod := range pods {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
|
||||
}
|
||||
|
||||
ginkgo.By("Run a critical pod that use 60% of a node resources.")
|
||||
@ -297,7 +298,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
||||
|
||||
ginkgo.By("Wait for pods to be scheduled.")
|
||||
for _, pod := range pods {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
|
||||
}
|
||||
|
||||
ginkgo.By("Run a high priority pod with node affinity to the first node.")
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -81,7 +82,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
||||
|
||||
err := framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
err = e2epod.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
@ -110,7 +111,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
|
||||
ginkgo.By("Verifying all pods are running properly")
|
||||
for _, pod := range pods {
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(cs, pod))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(cs, pod))
|
||||
}
|
||||
|
||||
// get the node API object
|
||||
@ -162,7 +163,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Expecting pod0 to be evicted immediately")
|
||||
err = framework.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
err = e2epod.WaitForPodCondition(cs, ns, pods[0].Name, "pod0 terminating", time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
// as node is unreachable, pod0 is expected to be in Terminating status
|
||||
// rather than getting deleted
|
||||
if pod.DeletionTimestamp != nil {
|
||||
@ -173,7 +174,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Expecting pod2 to be updated with a toleration with tolerationSeconds=300")
|
||||
err = framework.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
err = e2epod.WaitForPodCondition(cs, ns, pods[2].Name, "pod2 updated with tolerationSeconds=300", time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
if seconds, err := getTolerationSeconds(pod.Spec.Tolerations); err == nil {
|
||||
return seconds == 300, nil
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -106,7 +107,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName}))
|
||||
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
@ -215,12 +216,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
||||
}()
|
||||
// List the pods, making sure we observe all the replicas.
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Wait for all of them to be scheduled
|
||||
ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
|
||||
pods, err = framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now make sure they're spread across zones
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
)
|
||||
|
||||
@ -199,10 +200,10 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
defer func() {
|
||||
ginkgo.By("Cleaning up pods and PVs")
|
||||
for _, config := range configs {
|
||||
framework.DeletePodOrFail(c, ns, config.pod.Name)
|
||||
e2epod.DeletePodOrFail(c, ns, config.pod.Name)
|
||||
}
|
||||
for _, config := range configs {
|
||||
framework.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
|
||||
e2epod.WaitForPodNoLongerRunningInNamespace(c, config.pod.Name, ns)
|
||||
framework.PVPVCCleanup(c, ns, config.pv, config.pvc)
|
||||
err = framework.DeletePVSource(config.pvSource)
|
||||
framework.ExpectNoError(err)
|
||||
@ -240,7 +241,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
|
||||
ginkgo.By("Waiting for all pods to be running")
|
||||
for _, config := range configs {
|
||||
err = framework.WaitForPodRunningInNamespace(c, config.pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, config.pod)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ go_library(
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
|
@ -33,10 +33,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -261,7 +261,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
|
||||
ginkgo.By("Checking if VolumeAttachment was created for the pod")
|
||||
@ -336,7 +336,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
ginkgo.By("Checking CSI driver logs")
|
||||
|
||||
@ -367,13 +367,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
_, _, pod1 := createPod()
|
||||
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
|
||||
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod1.Name, pod1.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||
|
||||
_, _, pod2 := createPod()
|
||||
gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
|
||||
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod2.Name, pod2.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod2: %v", err)
|
||||
|
||||
_, _, pod3 := createPod()
|
||||
@ -434,7 +434,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||
|
||||
ginkgo.By("Expanding current pvc")
|
||||
@ -525,7 +525,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
gomega.Expect(*sc.AllowVolumeExpansion).To(gomega.BeTrue(), "failed creating sc with allowed expansion")
|
||||
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod1: %v", err)
|
||||
|
||||
ginkgo.By("Expanding current pvc")
|
||||
@ -711,7 +711,7 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
||||
"csi.storage.k8s.io/serviceAccount.name": "default",
|
||||
}
|
||||
// Load logs of driver pod
|
||||
log, err := framework.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
|
||||
log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not load CSI driver logs: %s", err)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
@ -159,7 +160,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
|
||||
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
|
||||
framework.NodeSelection{Selector: nodeSelector})
|
||||
defer testsuites.StopPod(cs, pod)
|
||||
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
|
||||
}
|
||||
test.TestDynamicProvisioning()
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
|
@ -56,6 +56,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
@ -861,7 +862,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(prepPod)
|
||||
framework.ExpectNoError(err, "while creating hostPath init pod")
|
||||
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
|
||||
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
@ -883,7 +884,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod)
|
||||
framework.ExpectNoError(err, "while creating hostPath teardown pod")
|
||||
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
|
||||
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -410,7 +411,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
||||
framework.ExpectNoError(err)
|
||||
}()
|
||||
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
|
||||
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
|
||||
|
||||
ginkgo.By("Ensuring each pod is running")
|
||||
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
@ -262,7 +264,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
|
||||
framework.DeletePodWithWait(f, c, pod)
|
||||
}
|
||||
}()
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(c, pod)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name))
|
||||
// Return created api objects
|
||||
pod, err = c.CoreV1().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -227,7 +228,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to mount volume and read from pod1", func() {
|
||||
@ -269,7 +270,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.By("Checking fsGroup is set")
|
||||
pod := createPodWithFsGroupTest(config, testVol, 1234, 1234)
|
||||
ginkgo.By("Deleting pod")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func() {
|
||||
@ -279,9 +280,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.By("Create second pod with same fsGroup and check fsGroup is correct")
|
||||
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup, fsGroup)
|
||||
ginkgo.By("Deleting first pod")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
ginkgo.By("Deleting second pod")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("should set different fsGroup for second pod if first pod is deleted", func() {
|
||||
@ -295,7 +296,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
ginkgo.By("Create second pod and check fsGroup is the new one")
|
||||
pod2 := createPodWithFsGroupTest(config, testVol, fsGroup2, fsGroup2)
|
||||
ginkgo.By("Deleting second pod")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
})
|
||||
})
|
||||
|
||||
@ -317,7 +318,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
|
||||
pod, err := createLocalPod(config, testVol, nil)
|
||||
framework.ExpectError(err)
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectError(err)
|
||||
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
|
||||
})
|
||||
@ -334,7 +335,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = framework.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
|
||||
framework.ExpectError(err)
|
||||
|
||||
cleanupLocalVolumes(config, []*localTestVolume{testVol})
|
||||
@ -703,7 +704,7 @@ func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeTyp
|
||||
pod, err := config.client.CoreV1().Pods(config.ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = framework.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodNameUnschedulableInNamespace(config.client, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -741,9 +742,9 @@ func twoPodsReadWriteTest(config *localTestConfig, testVol *localTestVolume) {
|
||||
testReadFileContent(volumeDir, testFile, testVol.ltr.Path, pod1, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
ginkgo.By("Deleting pod2")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
}
|
||||
|
||||
// Test two pods one after other, write from pod1, and read from pod2
|
||||
@ -762,7 +763,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod1, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod1")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod1.Name)
|
||||
|
||||
ginkgo.By("Creating pod2")
|
||||
pod2, pod2Err := createLocalPod(config, testVol, nil)
|
||||
@ -773,7 +774,7 @@ func twoPodsReadWriteSerialTest(config *localTestConfig, testVol *localTestVolum
|
||||
testReadFileContent(volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
|
||||
|
||||
ginkgo.By("Deleting pod2")
|
||||
framework.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
e2epod.DeletePodOrFail(config.client, config.ns, pod2.Name)
|
||||
}
|
||||
|
||||
// Test creating pod with fsGroup, and check fsGroup is expected fsGroup.
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -281,7 +282,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
pod := framework.MakeWritePod(ns, pvc)
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
|
||||
ginkgo.By("Deleting the claim")
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
@ -299,7 +300,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
|
||||
pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
|
||||
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
|
||||
e2elog.Logf("Pod exited without failure; the volume has been recycled.")
|
||||
})
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -272,7 +273,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
|
||||
"The same PVC should be used after failover.")
|
||||
|
||||
ginkgo.By("verifying the container output has 2 lines, indicating the pod has been created twice using the same regional PD.")
|
||||
logs, err := framework.GetPodLogs(c, ns, pod.Name, "")
|
||||
logs, err := e2epod.GetPodLogs(c, ns, pod.Name, "")
|
||||
framework.ExpectNoError(err,
|
||||
"Error getting logs from pod %s in namespace %s", pod.Name, ns)
|
||||
lineCount := len(strings.Split(strings.TrimSpace(logs), "\n"))
|
||||
|
@ -36,6 +36,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/podlogs:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
@ -358,7 +359,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
// pod might be nil now.
|
||||
StopPod(client, pod)
|
||||
}()
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@ -414,7 +415,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@ -430,7 +431,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
command = "select-string 'hello world' /mnt/test/data"
|
||||
}
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
|
||||
@ -496,8 +497,8 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
}()
|
||||
if expectUnschedulable {
|
||||
// Verify that no claims are provisioned.
|
||||
@ -532,7 +533,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) {
|
||||
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
|
||||
defer StopPod(c, pod)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
}
|
||||
|
||||
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
|
||||
@ -597,7 +598,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
||||
} else {
|
||||
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||
}
|
||||
framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||
e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -433,7 +434,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T
|
||||
}()
|
||||
|
||||
// Wait for pod to be running
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
// Exec into container that mounted the volume, delete subpath directory
|
||||
@ -795,7 +796,7 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) {
|
||||
defer func() {
|
||||
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
}()
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
ginkgo.By("Failing liveness probe")
|
||||
@ -886,7 +887,7 @@ func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err, "while creating pod")
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||
framework.ExpectNoError(err, "while waiting for pod to be running")
|
||||
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
@ -900,7 +901,7 @@ func formatVolume(f *framework.Framework, pod *v1.Pod) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err, "while creating volume init pod")
|
||||
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
|
||||
|
||||
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||
|
@ -30,11 +30,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -320,7 +322,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config volume.
|
||||
}
|
||||
}()
|
||||
|
||||
err = framework.WaitForPodRunningInNamespace(cs, clientPod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, clientPod)
|
||||
if err != nil {
|
||||
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// HostExec represents interface we require to execute commands on remote host.
|
||||
@ -50,7 +51,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
f := h.Framework
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
hostExecPod := framework.NewExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node), true)
|
||||
hostExecPod := e2epod.NewExecPodSpec(ns.Name, fmt.Sprintf("hostexec-%s", node), true)
|
||||
hostExecPod.Spec.NodeName = node
|
||||
hostExecPod.Spec.Volumes = []v1.Volume{
|
||||
{
|
||||
@ -77,7 +78,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
|
||||
}
|
||||
pod, err := cs.CoreV1().Pods(ns.Name).Create(hostExecPod)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForPodRunningInNamespace(cs, pod)
|
||||
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
|
||||
framework.ExpectNoError(err)
|
||||
return pod
|
||||
}
|
||||
@ -118,7 +119,7 @@ func (h *hostExecutor) IssueCommand(cmd string, node *v1.Node) error {
|
||||
// pods under test namespace which will be destroyed in teardown phase.
|
||||
func (h *hostExecutor) Cleanup() {
|
||||
for _, pod := range h.nodeExecPods {
|
||||
framework.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
e2epod.DeletePodOrFail(h.Framework.ClientSet, pod.Namespace, pod.Name)
|
||||
}
|
||||
h.nodeExecPods = make(map[string]*v1.Pod)
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user