Merge pull request #40879 from copejon/pv-e2e-unique-nfs-vol

Automatic merge from submit-queue (batch tested with PRs 40289, 40877, 40879, 39972, 40942)

PV E2E: provide each spec with a fresh nfs host

**What this PR does / why we need it**:
PersistentVolume e2e currently reuses an NFS host pod created at the start of the suite and accessed by each test.  This is far less favorable than using a fresh volume per test.  Additionally, this guards against the volume host pod or it's kubelet being disrupted, which has led to flakes.

```release-note-none
```
This commit is contained in:
Kubernetes Submit Queue 2017-02-04 04:43:07 -08:00 committed by GitHub
commit b1e669cae7
3 changed files with 34 additions and 57 deletions

View File

@ -59,14 +59,6 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
clientNode *v1.Node
)
nfsServerConfig := VolumeTestConfig{
namespace: metav1.NamespaceDefault,
prefix: "nfs",
serverImage: NfsServerImage,
serverPorts: []int{2049},
serverArgs: []string{"-G", "777", "/exports"},
}
BeforeEach(func() {
// To protect the NFS volume pod from the kubelet restart, we isolate it on its own node.
framework.SkipUnlessNodeCountIsAtLeast(MinNodes)
@ -74,23 +66,21 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
ns = f.Namespace.Name
// Start the NFS server pod.
if nfsServerPod == nil {
framework.Logf("[BeforeEach] Initializing NFS Server Pod")
nfsServerPod = startVolumeServer(c, nfsServerConfig)
framework.Logf("[BeforeEach] Creating NFS Server Pod")
nfsServerPod = initNFSserverPod(c, ns)
framework.Logf("[BeforeEach] Configuring PersistentVolume")
nfsServerIP = nfsServerPod.Status.PodIP
Expect(nfsServerIP).NotTo(BeEmpty())
nfsPVconfig = persistentVolumeConfig{
namePrefix: "nfs-",
pvSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServerIP,
Path: "/exports",
ReadOnly: false,
},
framework.Logf("[BeforeEach] Configuring PersistentVolume")
nfsServerIP = nfsServerPod.Status.PodIP
Expect(nfsServerIP).NotTo(BeEmpty())
nfsPVconfig = persistentVolumeConfig{
namePrefix: "nfs-",
pvSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: nfsServerIP,
Path: "/exports",
ReadOnly: false,
},
}
},
}
// Get the first ready node IP that is not hosting the NFS pod.
if clientNodeIP == "" {
@ -100,19 +90,15 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Disruptive][Flaky]",
if node.Name != nfsServerPod.Spec.NodeName {
clientNode = &node
clientNodeIP = framework.GetNodeExternalIP(clientNode)
Expect(clientNodeIP).NotTo(BeEmpty())
break
}
}
Expect(clientNodeIP).NotTo(BeEmpty())
}
})
AddCleanupAction(func() {
if nfsServerPod != nil && c != nil {
By("Deleting NFS server pod")
nfsServerPodCleanup(c, nfsServerConfig)
nfsServerPod = nil
}
AfterEach(func() {
deletePodWithWait(f, c, nfsServerPod)
})
Context("when kubelet restarts", func() {

View File

@ -85,6 +85,18 @@ func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig persistent
return clientPod, pv, pvc
}
// initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod
// commonly used by persistent volume testing
func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod {
return startVolumeServer(c, VolumeTestConfig{
namespace: ns,
prefix: "nfs",
serverImage: NfsServerImage,
serverPorts: []int{2049},
serverArgs: []string{"-G", "777", "/exports"},
})
}
var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
// global vars for the Context()s and It()'s below
@ -105,30 +117,16 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
framework.KubeDescribe("PersistentVolumes:NFS[Flaky]", func() {
var (
NFSconfig VolumeTestConfig
nfsServerPod *v1.Pod
serverIP string
pvConfig persistentVolumeConfig
)
// config for the nfs-server pod in the default namespace
NFSconfig = VolumeTestConfig{
namespace: metav1.NamespaceDefault,
prefix: "nfs",
serverImage: NfsServerImage,
serverPorts: []int{2049},
serverArgs: []string{"-G", "777", "/exports"},
}
BeforeEach(func() {
// If it doesn't exist, create the nfs server pod in the "default" ns.
// The "default" ns is used so that individual tests can delete their
// ns without impacting the nfs-server pod.
if nfsServerPod == nil {
nfsServerPod = startVolumeServer(c, NFSconfig)
serverIP = nfsServerPod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
}
framework.Logf("[BeforeEach] Creating NFS Server Pod")
nfsServerPod = initNFSserverPod(c, ns)
serverIP = nfsServerPod.Status.PodIP
framework.Logf("[BeforeEach] Configuring PersistentVolume")
pvConfig = persistentVolumeConfig{
namePrefix: "nfs-",
pvSource: v1.PersistentVolumeSource{
@ -141,13 +139,8 @@ var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() {
}
})
// Execute after *all* the tests have run
AddCleanupAction(func() {
if nfsServerPod != nil && c != nil {
framework.Logf("AfterSuite: nfs-server pod %v is non-nil, deleting pod", nfsServerPod.Name)
nfsServerPodCleanup(c, NFSconfig)
nfsServerPod = nil
}
AfterEach(func() {
deletePodWithWait(f, c, nfsServerPod)
})
Context("with Single PV - PVC pairs", func() {

View File

@ -183,8 +183,6 @@ func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
By("sleeping a bit to give the server time to start")
time.Sleep(20 * time.Second)
}
return pod