Merge pull request #61181 from gnufied/refactor-disruptive-storage-tests

Automatic merge from submit-queue (batch tested with PRs 60793, 61181, 61267, 61252, 61334). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Refactor disruptive tests to use more volume types

Refactor storage disruptive tests to use different volume types. Also mark existing tests to be NFS specific. Fixes https://github.com/kubernetes/kubernetes/issues/61150

cc @jeffvance @jingxu97 



/sig storage

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue 2018-03-21 20:23:10 -07:00 committed by GitHub
commit d4e32379ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 181 additions and 13 deletions

View File

@ -857,6 +857,47 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
return podSpec
}
// Returns a pod definition based on the namespace using nginx image
func MakeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod {
podSpec := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-tester-",
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "write-pod",
Image: "nginx",
Ports: []v1.ContainerPort{
{
Name: "http-server",
ContainerPort: 80,
},
},
},
},
},
}
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
var volumes = make([]v1.Volume, len(pvclaims))
for index, pvclaim := range pvclaims {
volumename := fmt.Sprintf("volume%v", index+1)
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments.
@ -935,6 +976,25 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
return pod, nil
}
func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) {
pod := MakeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
return pod, nil
}
// create security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel)

View File

@ -7,10 +7,11 @@ go_library(
"csi_volumes.go",
"empty_dir_wrapper.go",
"flexvolume.go",
"generic_persistent_volume-disruptive.go",
"mounted_volume_resize.go",
"nfs_persistent_volume-disruptive.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"persistent_volumes-gce.go",
"persistent_volumes-local.go",
"pv_protection.go",

View File

@ -0,0 +1,103 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() {
f := framework.NewDefaultFramework("generic-disruptive-pv")
var (
c clientset.Interface
ns string
)
BeforeEach(func() {
// Skip tests unless number of nodes is 2
framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipIfProviderIs("local")
c = f.ClientSet
ns = f.Namespace.Name
})
disruptiveTestTable := []disruptiveTest{
{
testItStmt: "Should test that a file written to the mount before kubelet restart is readable after restart.",
runTest: utils.TestKubeletRestartsAndRestoresMount,
},
{
testItStmt: "Should test that a volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromDeletedPod,
},
{
testItStmt: "Should test that a volume mounted to a pod that is force deleted while the kubelet is down unmounts when the kubelet returns.",
runTest: utils.TestVolumeUnmountsFromForceDeletedPod,
},
}
Context("When kubelet restarts", func() {
// Test table housing the It() title string and test spec. runTest is type testBody, defined at
// the start of this file. To add tests, define a function mirroring the testBody signature and assign
// to runTest.
var (
clientPod *v1.Pod
pvc *v1.PersistentVolumeClaim
)
BeforeEach(func() {
framework.Logf("Initializing pod and pvcs for test")
clientPod, pvc = createPodPVCFromSC(f, c, ns)
})
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod, pvc)
})
}(test)
}
AfterEach(func() {
framework.Logf("Tearing down test spec")
tearDownTestCase(c, f, ns, clientPod, pvc, nil)
pvc, clientPod = nil, nil
})
})
})
func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string) (*v1.Pod, *v1.PersistentVolumeClaim) {
var err error
test := storageClassTest{
name: "default",
claimSize: "2Gi",
}
pvc := newClaim(test, ns, "default")
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
Expect(err).NotTo(HaveOccurred(), "Error creating pvc")
pvcClaims := []*v1.PersistentVolumeClaim{pvc}
pvs, err := framework.WaitForPVClaimBoundPhase(c, pvcClaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for PVC to be bound %v", err)
Expect(len(pvs)).To(Equal(1))
By("Creating a pod with dynamically provisioned volume")
pod, err := framework.CreateNginxPod(c, ns, nil, pvcClaims)
Expect(err).NotTo(HaveOccurred(), "While creating pods for kubelet restart test")
return pod, pvc
}

View File

@ -31,7 +31,7 @@ import (
"k8s.io/kubernetes/test/e2e/storage/utils"
)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume)
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim)
type disruptiveTest struct {
testItStmt string
runTest testBody
@ -41,7 +41,7 @@ const (
MinNodes = 2
)
var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
f := framework.NewDefaultFramework("disruptive-pv")
var (
@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
func(t disruptiveTest) {
It(t.testItStmt, func() {
By("Executing Spec")
t.runTest(c, f, clientPod, pvc, pv)
t.runTest(c, f, clientPod, pvc)
})
}(test)
}
@ -280,5 +280,7 @@ func tearDownTestCase(c clientset.Interface, f *framework.Framework, ns string,
// Ignore deletion errors. Failing on them will interrupt test cleanup.
framework.DeletePodWithWait(f, c, client)
framework.DeletePersistentVolumeClaim(c, pvc.Name, ns)
framework.DeletePersistentVolume(c, pv.Name)
if pv != nil {
framework.DeletePersistentVolume(c, pv.Name)
}
}

View File

@ -138,7 +138,7 @@ func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) s
}
// TestKubeletRestartsAndRestoresMount tests that a volume mounted to a pod remains mounted after a kubelet restarts
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
By("Writing to the volume.")
file := "/mnt/_SUCCESS"
out, err := PodExec(clientPod, fmt.Sprintf("touch %s", file))
@ -157,7 +157,7 @@ func TestKubeletRestartsAndRestoresMount(c clientset.Interface, f *framework.Fra
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcelly deleted.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, forceDelete bool) {
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, forceDelete bool) {
nodeIP, err := framework.GetHostExternalAddress(c, clientPod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
@ -182,6 +182,8 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
err = c.CoreV1().Pods(clientPod.Namespace).Delete(clientPod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
// Wait for pod to enter "Terminating state"
time.Sleep(30 * time.Second)
By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = f.WaitForPodTerminated(clientPod.Name, "")
@ -203,13 +205,13 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, false)
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, false)
}
// TestVolumeUnmountsFromFoceDeletedPod tests that a volume unmounts if the client pod was forcelly deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, pv, true)
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, pvc *v1.PersistentVolumeClaim) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, pvc, true)
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.

View File

@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
3. Verify that written file is accessible after kubelet restart
*/
It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc, pv)
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod, pvc)
})
/*
@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
5. Verify that volume mount not to be found.
*/
It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc, pv)
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod, pvc)
})
/*