mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #128096 from bart0sh/PR161-e2e_node-consolidate-NFSServer-APIs
e2e_node: consolidated NFSServer APIs.
This commit is contained in:
commit
f64eeb523d
@ -176,6 +176,24 @@ func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, names
|
||||
return config, pod, host
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func RestartNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const startcmd = "rpc.nfsd 1"
|
||||
_, _, err := PodExec(f, serverPod, startcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func StopNFSServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
|
||||
_, _, err := PodExec(f, serverPod, stopcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
|
||||
// and ip address string are returned.
|
||||
// Note: Expect() is called so no error is returned.
|
||||
|
@ -109,24 +109,6 @@ func waitTillNPodsRunningOnNodes(ctx context.Context, c clientset.Interface, nod
|
||||
})
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func restartNfsServer(serverPod *v1.Pod) {
|
||||
const startcmd = "/usr/sbin/rpc.nfsd 1"
|
||||
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
|
||||
e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func stopNfsServer(serverPod *v1.Pod) {
|
||||
const stopcmd = "/usr/sbin/rpc.nfsd 0"
|
||||
ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
|
||||
e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
|
||||
}
|
||||
|
||||
// Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container
|
||||
// will execute the passed in shell cmd. Waits for the pod to start.
|
||||
// Note: the nfs plugin is defined inline, no PV or PVC.
|
||||
@ -437,7 +419,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
pod = createPodUsingNfs(ctx, f, c, ns, nfsIP, t.podCmd)
|
||||
|
||||
ginkgo.By("Stop the NFS server")
|
||||
stopNfsServer(nfsServerPod)
|
||||
e2evolume.StopNFSServer(f, nfsServerPod)
|
||||
|
||||
ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
|
||||
err := e2epod.DeletePodWithWait(ctx, c, pod)
|
||||
@ -448,7 +430,7 @@ var _ = SIGDescribe("kubelet", func() {
|
||||
checkPodCleanup(ctx, c, pod, false)
|
||||
|
||||
ginkgo.By("Restart the nfs server")
|
||||
restartNfsServer(nfsServerPod)
|
||||
e2evolume.RestartNFSServer(f, nfsServerPod)
|
||||
|
||||
ginkgo.By("Verify that the deleted client pod is now cleaned up")
|
||||
checkPodCleanup(ctx, c, pod, true)
|
||||
|
@ -231,7 +231,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("Stopping the NFS server")
|
||||
stopNfsServer(f, nfsServerPod)
|
||||
e2evolume.StopNFSServer(f, nfsServerPod)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting the static nfs test pod: %s", staticPodName))
|
||||
err = deleteStaticPod(podPath, staticPodName, ns)
|
||||
@ -243,7 +243,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("pod volume should exist while nfs server is stopped"))
|
||||
|
||||
ginkgo.By("Start the NFS server")
|
||||
restartNfsServer(f, nfsServerPod)
|
||||
e2evolume.RestartNFSServer(f, nfsServerPod)
|
||||
|
||||
ginkgo.By("Waiting for the pod volume to deleted after the NFS server is started")
|
||||
gomega.Eventually(func() bool {
|
||||
@ -286,25 +286,6 @@ func podVolumeDirectoryExists(uid types.UID) bool {
|
||||
return podVolumeDirectoryExists
|
||||
}
|
||||
|
||||
// Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads from
|
||||
// (presumably) zero back to 1, and therefore allows nfs to open connections again.
|
||||
func restartNfsServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const startcmd = "/usr/sbin/rpc.nfsd 1"
|
||||
_, _, err := e2evolume.PodExec(f, serverPod, startcmd)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
}
|
||||
|
||||
// Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
|
||||
// pod's (only) container. This command changes the number of nfs server threads to 0,
|
||||
// thus closing all open nfs connections.
|
||||
func stopNfsServer(f *framework.Framework, serverPod *v1.Pod) {
|
||||
const stopcmd = "rpc.nfsd 0 && for i in $(seq 200); do rpcinfo -p | grep -q nfs || break; sleep 1; done"
|
||||
_, _, err := e2evolume.PodExec(f, serverPod, stopcmd)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func createStaticPodUsingNfs(nfsIP string, nodeName string, cmd string, dir string, name string, ns string) error {
|
||||
ginkgo.By("create pod using nfs volume")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user