mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
Merge pull request #86679 from oomichi/remove-invalid-dependency-27
Use e2epod.WaitForPodNameRunningInNamespace directly
This commit is contained in:
commit
a26f50a52e
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -147,7 +148,7 @@ var _ = SIGDescribe("Generated clientset", func() {
|
|||||||
|
|
||||||
// We need to wait for the pod to be scheduled, otherwise the deletion
|
// We need to wait for the pod to be scheduled, otherwise the deletion
|
||||||
// will be carried out immediately rather than gracefully.
|
// will be carried out immediately rather than gracefully.
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("deleting the pod gracefully")
|
ginkgo.By("deleting the pod gracefully")
|
||||||
gracePeriod := int64(31)
|
gracePeriod := int64(31)
|
||||||
|
@ -142,7 +142,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||||||
if pod.DeletionTimestamp != nil {
|
if pod.DeletionTimestamp != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = f.WaitForPodRunning(pod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
if getErr == nil {
|
if getErr == nil {
|
||||||
|
@ -144,7 +144,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||||||
if pod.DeletionTimestamp != nil {
|
if pod.DeletionTimestamp != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = f.WaitForPodRunning(pod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
if getErr == nil {
|
if getErr == nil {
|
||||||
|
@ -732,7 +732,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
|
||||||
if err := f.WaitForPodRunning(podName); err != nil {
|
if err := e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name); err != nil {
|
||||||
framework.Failf("Pod %v did not start running: %v", podName, err)
|
framework.Failf("Pod %v did not start running: %v", podName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -841,7 +841,7 @@ func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath, podN
|
|||||||
}
|
}
|
||||||
ginkgo.By("Creating the pod")
|
ginkgo.By("Creating the pod")
|
||||||
pod = f.PodClient().Create(pod)
|
pod = f.PodClient().Create(pod)
|
||||||
return f.WaitForPodRunning(pod.Name)
|
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath, podName string) error {
|
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath, podName string) error {
|
||||||
@ -903,5 +903,5 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
|
|||||||
}
|
}
|
||||||
ginkgo.By("Creating the pod")
|
ginkgo.By("Creating the pod")
|
||||||
pod = f.PodClient().Create(pod)
|
pod = f.PodClient().Create(pod)
|
||||||
return f.WaitForPodRunning(pod.Name)
|
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -283,7 +284,7 @@ var _ = ginkgo.Describe("[sig-storage] EmptyDir volumes", func() {
|
|||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
|
|
||||||
ginkgo.By("Waiting for the pod running")
|
ginkgo.By("Waiting for the pod running")
|
||||||
err = f.WaitForPodRunning(pod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
|
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
|
||||||
|
|
||||||
ginkgo.By("Geting the pod")
|
ginkgo.By("Geting the pod")
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet"
|
"k8s.io/kubernetes/pkg/kubelet"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -290,7 +291,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
// may be carried out immediately rather than gracefully.
|
// may be carried out immediately rather than gracefully.
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
// save the running pod
|
// save the running pod
|
||||||
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||||
@ -397,7 +398,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
pod.Labels["time"] = value
|
pod.Labels["time"] = value
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("verifying the updated pod is in kubernetes")
|
ginkgo.By("verifying the updated pod is in kubernetes")
|
||||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||||
@ -718,7 +719,7 @@ var _ = framework.KubeDescribe("Pods", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
time.Sleep(syncLoopFrequency)
|
time.Sleep(syncLoopFrequency)
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("get restart delay after image update")
|
ginkgo.By("get restart delay after image update")
|
||||||
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
|
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
|
||||||
|
@ -632,7 +632,7 @@ func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName
|
|||||||
}
|
}
|
||||||
ginkgo.By("Creating the pod")
|
ginkgo.By("Creating the pod")
|
||||||
pod = f.PodClient().Create(pod)
|
pod = f.PodClient().Create(pod)
|
||||||
return f.WaitForPodRunning(pod.Name)
|
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error {
|
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error {
|
||||||
@ -693,5 +693,5 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
|
|||||||
}
|
}
|
||||||
ginkgo.By("Creating the pod")
|
ginkgo.By("Creating the pod")
|
||||||
pod = f.PodClient().Create(pod)
|
pod = f.PodClient().Create(pod)
|
||||||
return f.WaitForPodRunning(pod.Name)
|
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
}
|
}
|
||||||
|
@ -520,11 +520,6 @@ func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) er
|
|||||||
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
return e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
|
||||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
|
||||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||||
func (f *Framework) WaitForPodReady(podName string) error {
|
func (f *Framework) WaitForPodReady(podName string) error {
|
||||||
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
return e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||||
|
@ -565,7 +565,7 @@ func (config *NetworkingTestConfig) createTestPods() {
|
|||||||
config.createPod(hostTestContainerPod)
|
config.createPod(hostTestContainerPod)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(config.f.ClientSet, testContainerPod.Name, config.f.Namespace.Name))
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
config.TestContainerPod, err = config.getPodClient().Get(context.TODO(), testContainerPod.Name, metav1.GetOptions{})
|
config.TestContainerPod, err = config.getPodClient().Get(context.TODO(), testContainerPod.Name, metav1.GetOptions{})
|
||||||
@ -574,7 +574,7 @@ func (config *NetworkingTestConfig) createTestPods() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if config.HostNetwork {
|
if config.HostNetwork {
|
||||||
framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(config.f.ClientSet, hostTestContainerPod.Name, config.f.Namespace.Name))
|
||||||
config.HostTestContainerPod, err = config.getPodClient().Get(context.TODO(), hostTestContainerPod.Name, metav1.GetOptions{})
|
config.HostTestContainerPod, err = config.getPodClient().Get(context.TODO(), hostTestContainerPod.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
|
framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
@ -482,7 +483,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
|
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = f.WaitForPodRunning(testServerPod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testServerPod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name)
|
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name)
|
||||||
|
|
||||||
// Retrieve server pod IP.
|
// Retrieve server pod IP.
|
||||||
@ -514,7 +515,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = f.WaitForPodRunning(testUtilsPod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name)
|
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||||
|
|
||||||
ginkgo.By("Verifying customized DNS option is configured on pod...")
|
ginkgo.By("Verifying customized DNS option is configured on pod...")
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -226,7 +227,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
|||||||
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod, metav1.CreateOptions{})
|
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.utilPod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod)
|
framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod)
|
||||||
framework.Logf("Created pod %v", t.utilPod)
|
framework.Logf("Created pod %v", t.utilPod)
|
||||||
err = t.f.WaitForPodRunning(t.utilPod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(t.f.ClientSet, t.utilPod.Name, t.f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "pod failed to start running: %v", t.utilPod)
|
framework.ExpectNoError(err, "pod failed to start running: %v", t.utilPod)
|
||||||
|
|
||||||
t.utilService = &v1.Service{
|
t.utilService = &v1.Service{
|
||||||
@ -351,7 +352,7 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
|
|||||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod, metav1.CreateOptions{})
|
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(context.TODO(), t.dnsServerPod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod)
|
framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod)
|
||||||
framework.Logf("Created pod %v", t.dnsServerPod)
|
framework.Logf("Created pod %v", t.dnsServerPod)
|
||||||
err = t.f.WaitForPodRunning(t.dnsServerPod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(t.f.ClientSet, t.dnsServerPod.Name, t.f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod)
|
framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod)
|
||||||
|
|
||||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(context.TODO(), t.dnsServerPod.Name, metav1.GetOptions{})
|
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(context.TODO(), t.dnsServerPod.Name, metav1.GetOptions{})
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
@ -99,7 +100,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
|
|||||||
|
|
||||||
ginkgo.By("submitting the pod to kubernetes")
|
ginkgo.By("submitting the pod to kubernetes")
|
||||||
podClient.CreateSync(pod)
|
podClient.CreateSync(pod)
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -76,7 +77,7 @@ var _ = SIGDescribe("Events", func() {
|
|||||||
framework.Failf("Failed to create pod: %v", err)
|
framework.Failf("Failed to create pod: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("verifying the pod is in kubernetes")
|
ginkgo.By("verifying the pod is in kubernetes")
|
||||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -102,7 +103,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
|||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
// may be carried out immediately rather than gracefully.
|
// may be carried out immediately rather than gracefully.
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
// save the running pod
|
// save the running pod
|
||||||
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||||
|
@ -191,7 +191,7 @@ var _ = SIGDescribe("PreStop", func() {
|
|||||||
podClient.Create(pod)
|
podClient.Create(pod)
|
||||||
|
|
||||||
ginkgo.By("waiting for pod running")
|
ginkgo.By("waiting for pod running")
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
|
@ -267,7 +267,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
|
|||||||
_, err = client.Create(context.TODO(), pod, metav1.CreateOptions{})
|
_, err = client.Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Error creating pod %v", pod)
|
framework.ExpectNoError(err, "Error creating pod %v", pod)
|
||||||
|
|
||||||
err = f.WaitForPodRunning(pod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
|
framework.ExpectNoError(err, "Error waiting for pod to run %v", pod)
|
||||||
|
|
||||||
// for this to work, SELinux should be in enforcing mode, so let's check that
|
// for this to work, SELinux should be in enforcing mode, so let's check that
|
||||||
|
@ -232,7 +232,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
ginkgo.By("Wait the pod becomes running")
|
ginkgo.By("Wait the pod becomes running")
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{})
|
labelPod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), labelPodName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
ginkgo.By("Verify the pod was scheduled to the expected node.")
|
ginkgo.By("Verify the pod was scheduled to the expected node.")
|
||||||
@ -331,7 +331,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
Name: tolerationPodName,
|
Name: tolerationPodName,
|
||||||
Tolerations: tolerations,
|
Tolerations: tolerations,
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
|
ginkgo.By("Pod should prefer scheduled to the node that pod can tolerate.")
|
||||||
tolePod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{})
|
tolePod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), tolerationPodName, metav1.GetOptions{})
|
||||||
|
@ -17,6 +17,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/skipper:go_default_library",
|
"//test/e2e/framework/skipper:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -145,7 +146,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
// may be carried out immediately rather than gracefully.
|
// may be carried out immediately rather than gracefully.
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("ensuring pod is modified")
|
ginkgo.By("ensuring pod is modified")
|
||||||
// save the running pod
|
// save the running pod
|
||||||
@ -265,7 +266,7 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
|
|||||||
|
|
||||||
// We need to wait for the pod to be running, otherwise the deletion
|
// We need to wait for the pod to be running, otherwise the deletion
|
||||||
// may be carried out immediately rather than gracefully.
|
// may be carried out immediately rather than gracefully.
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(originalPod.Name))
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, originalPod.Name, f.Namespace.Name))
|
||||||
|
|
||||||
ginkgo.By("ensuring pod is modified")
|
ginkgo.By("ensuring pod is modified")
|
||||||
// save the running pod
|
// save the running pod
|
||||||
|
@ -418,7 +418,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
|
|||||||
if pod.DeletionTimestamp != nil {
|
if pod.DeletionTimestamp != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = f.WaitForPodRunning(pod.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
|
framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -157,7 +157,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
|
|||||||
defer e2epod.DeletePodWithWait(c, pod)
|
defer e2epod.DeletePodWithWait(c, pod)
|
||||||
|
|
||||||
ginkgo.By("Waiting for pod to go to 'running' state")
|
ginkgo.By("Waiting for pod to go to 'running' state")
|
||||||
err = f.WaitForPodRunning(pod.ObjectMeta.Name)
|
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err)
|
framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err)
|
||||||
|
|
||||||
ginkgo.By("Expanding current pvc")
|
ginkgo.By("Expanding current pvc")
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -59,7 +60,7 @@ var _ = SIGDescribe("DNS", func() {
|
|||||||
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
framework.ExpectNoError(f.WaitForPodRunning(testUtilsPod.Name), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, testUtilsPod.Name, f.Namespace.Name), "failed to wait for pod %s to be running", testUtilsPod.Name)
|
||||||
|
|
||||||
ginkgo.By("Verifying customized DNS option is configured on pod...")
|
ginkgo.By("Verifying customized DNS option is configured on pod...")
|
||||||
cmd := []string{"ipconfig", "/all"}
|
cmd := []string{"ipconfig", "/all"}
|
||||||
|
Loading…
Reference in New Issue
Block a user