From b85c4fc57a43cf3a2379a29ee502a5c7ef5ae7b3 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 14 Mar 2018 11:15:58 -0400 Subject: [PATCH] Refactor disruptive tests to use more volume types --- test/e2e/framework/pv_util.go | 60 ++++++++++ test/e2e/storage/BUILD | 3 +- .../generic_persistent_volume-disruptive.go | 103 ++++++++++++++++++ ...go => nfs_persistent_volume-disruptive.go} | 10 +- test/e2e/storage/utils/utils.go | 14 ++- .../vsphere/persistent_volumes-vsphere.go | 4 +- 6 files changed, 181 insertions(+), 13 deletions(-) create mode 100644 test/e2e/storage/generic_persistent_volume-disruptive.go rename test/e2e/storage/{persistent_volumes-disruptive.go => nfs_persistent_volume-disruptive.go} (97%) diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index 01864f7e9c7..21e2b6d9e64 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -857,6 +857,47 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten return podSpec } +// Returns a pod definition based on the namespace using nginx image +func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod { + podSpec := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(), + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-tester-", + Namespace: ns, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "write-pod", + Image: "nginx", + Ports: []v1.ContainerPort{ + { + Name: "http-server", + ContainerPort: 80, + }, + }, + }, + }, + }, + } + var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) + var volumes = make([]v1.Volume, len(pvclaims)) + for index, pvclaim := range pvclaims { + volumename := fmt.Sprintf("volume%v", index+1) + volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename} + volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} + } + podSpec.Spec.Containers[0].VolumeMounts = volumeMounts + podSpec.Spec.Volumes = volumes + if nodeSelector != nil { + podSpec.Spec.NodeSelector = nodeSelector + } + return podSpec +} + // Returns a pod definition based on the namespace. The pod references the PVC's // name. A slice of BASH commands can be supplied as args to be run by the pod. // SELinux testing requires to pass HostIPC and HostPID as booleansi arguments. @@ -935,6 +976,25 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st return pod, nil } +func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { + pod := MakeNginxPod(namespace, nodeSelector, pvclaims) + pod, err := client.CoreV1().Pods(namespace).Create(pod) + if err != nil { + return nil, fmt.Errorf("pod Create API error: %v", err) + } + // Waiting for pod to be running + err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace) + if err != nil { + return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) + } + // get fresh pod info + pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return pod, fmt.Errorf("pod Get API error: %v", err) + } + return pod, nil +} + // create security pod with given claims func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) { pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index af103b9f043..e08196e25b6 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -7,10 +7,11 @@ go_library( "csi_volumes.go", "empty_dir_wrapper.go", "flexvolume.go", + "generic_persistent_volume-disruptive.go", "mounted_volume_resize.go", + "nfs_persistent_volume-disruptive.go", "pd.go", "persistent_volumes.go", - "persistent_volumes-disruptive.go", "persistent_volumes-gce.go", "persistent_volumes-local.go", "pv_protection.go", diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go new file mode 100644 index 00000000000..8d73a5f0960 --- /dev/null +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -0,0 +1,103 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { + f := framework.NewDefaultFramework("generic-disruptive-pv") + var ( + c clientset.Interface + ns string + ) + + BeforeEach(func() { + // Skip tests unless number of nodes is 2 + framework.SkipUnlessNodeCountIsAtLeast(2) + framework.SkipIfProviderIs("local") + c = f.ClientSet + ns = f.Namespace.Name + }) + disruptiveTestTable := []disruptiveTest{ + { + testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.", + runTest: utils.TestKubeletRestartsAndRestoresMount, + }, + { + testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.", + runTest: utils.TestVolumeUnmountsFromDeletedPod, + }, + { + testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.", + runTest: utils.TestVolumeUnmountsFromForceDeletedPod, + }, + } + Context("When kubelet restarts", func() { + // Test table housing the It() title string and test spec. runTest is type testBody, defined at + // the start of this file. To add tests, define a function mirroring the testBody signature and assign + // to runTest. + var ( + clientPod *v1.Pod + pvc *v1.PersistentVolumeClaim + ) + BeforeEach(func() { + framework.Logf("Initializing pod and pvcs for test") + clientPod, pvc = createPodPVCFromSC(f, c, ns) + }) + for _, test := range disruptiveTestTable { + func(t disruptiveTest) { + It(t.testItStmt, func() { + By("Executing Spec") + t.runTest(c, f, clientPod, pvc) + }) + }(test) + } + AfterEach(func() { + framework.Logf("Tearing down test spec") + tearDownTestCase(c, f, ns, clientPod, pvc, nil) + pvc, clientPod = nil, nil + }) + }) +}) + +func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) { + var err error + test := storageClassTest{ + name: "default", + claimSize: "2Gi", + } + pvc := newClaim(test, ns, "default") + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + Expect(err).NotTo(HaveOccurred(), "Error creating pvc") + pvcClaims := []*v1.PersistentVolumeClaim{pvc} + pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err) + Expect(len(pvs)).To(Equal(1)) + + By("Creating a pod with dynamically provisioned volume") + pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims) + Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test") + return pod, pvc +} diff --git a/test/e2e/storage/persistent_volumes-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go similarity index 97% rename from test/e2e/storage/persistent_volumes-disruptive.go rename to test/e2e/storage/nfs_persistent_volume-disruptive.go index 7457f527c56..55b77b48f6e 100644 --- a/test/e2e/storage/persistent_volumes-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" ) -type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) +type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) type disruptiveTest struct { testItStmt string runTest testBody @@ -41,7 +41,7 @@ const ( MinNodes = 2 ) -var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { +var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { f := framework.NewDefaultFramework("disruptive-pv") var ( @@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() { func(t disruptiveTest) { It(t.testItStmt, func() { By("Executing Spec") - t.runTest(c, f, clientPod, pvc, pv) + t.runTest(c, f, clientPod, pvc) }) }(test) } @@ -280,5 +280,7 @@ func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string, // Ignore deletion errors. Failing on them will interrupt test cleanup. framework.DeletePodWithWait(f, c, client) framework.DeletePersistentVolumeClaim(c, pvc.Name, ns) - framework.DeletePersistentVolume(c, pv.Name) + if pv != nil { + framework.DeletePersistentVolume(c, pv.Name) + } } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 2da90e5b626..08313f36de9 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -138,7 +138,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s } // TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts -func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { +func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) { By("Writing to the volume.") file := "/mnt/_SUCCESS" out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file)) @@ -157,7 +157,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. // forceDelete is true indicating whether the pod is forcelly deleted. -func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDelete bool) { +func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, forceDelete bool) { nodeIP, err := framework.GetHostExternalAddress(c, clientPod) Expect(err).NotTo(HaveOccurred()) nodeIP = nodeIP + ":22" @@ -182,6 +182,8 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{}) } Expect(err).NotTo(HaveOccurred()) + // Wait for pod to enter "Terminating state" + time.Sleep(30 * time.Second) By("Starting the kubelet and waiting for pod to delete.") KubeletCommand(KStart, c, clientPod) err = f.WaitForPodTerminated(clientPod.Name, "") @@ -203,13 +205,13 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f } // TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down. -func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { - TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, false) +func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) { + TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, false) } // TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcelly deleted while the kubelet was down. -func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { - TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, true) +func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) { + TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, true) } // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index e66cec112d9..7fc55628ac4 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 3. Verify that written file is accessible after kubelet restart */ It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { - utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv) + utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc) }) /* @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 5. Verify that volume mount not to be found. */ It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { - utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv) + utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc) }) /*