mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-12 12:48:51 +00:00
Refactored pod-related functions from framework/util.go
This a refactoring of framework/utils.go into framework/pod. Signed-off-by: Jorge Alarcon Ochoa <alarcj137@gmail.com>
This commit is contained in:
committed by
alejandrox1
parent
b3981a2f9a
commit
4969a05327
@@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
@@ -358,7 +359,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
|
||||
// pod might be nil now.
|
||||
StopPod(client, pod)
|
||||
}()
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@@ -414,7 +415,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
|
||||
command := "echo 'hello world' > /mnt/test/data"
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
actualNodeName := runningPod.Spec.NodeName
|
||||
@@ -430,7 +431,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
||||
command = "select-string 'hello world' /mnt/test/data"
|
||||
}
|
||||
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
|
||||
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get pod")
|
||||
gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
|
||||
@@ -496,8 +497,8 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||
}()
|
||||
if expectUnschedulable {
|
||||
// Verify that no claims are provisioned.
|
||||
@@ -532,7 +533,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node framework.NodeSelection) {
|
||||
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
|
||||
defer StopPod(c, pod)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
|
||||
}
|
||||
|
||||
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
|
||||
@@ -597,7 +598,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
||||
} else {
|
||||
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||
}
|
||||
framework.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||
e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
||||
|
Reference in New Issue
Block a user