Merge pull request #66925 from msau42/udev

Automatic merge from submit-queue (batch tested with PRs 66933, 66925). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Rework multi-volume test to use StatefulSet

**What this PR does / why we need it**:
The e2e test that got added as part of https://github.com/kubernetes/kubernetes/pull/66832 fails in a multi-zone environment because the volumes get provisioned in random zones.  This PR reworks the test to use StatefulSet instead, which handles provisioning multiple PVCs in the same zone.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-08-03 14:36:02 -07:00 committed by GitHub
commit babdbff848
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -23,6 +23,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -30,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
@ -303,50 +305,118 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
Describe("Default StorageClass", func() {
Context("pods that use multiple volumes", func() {
AfterEach(func() {
framework.DeleteAllStatefulSets(c, ns)
})
It("should be reschedulable", func() {
// Only run on providers with default storageclass
framework.SkipUnlessProviderIs("openstack", "gce", "gke", "vsphere", "azure")
numVols := 4
pvcs := []*v1.PersistentVolumeClaim{}
ssTester := framework.NewStatefulSetTester(c)
By("Creating PVCs")
for i := 0; i < numVols; i++ {
pvc = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc, err = framework.CreatePVC(c, ns, pvc)
Expect(err).NotTo(HaveOccurred())
pvcs = append(pvcs, pvc)
}
By("Waiting for PVCs to be bound")
for _, pvc := range pvcs {
framework.Logf("Created PVC %q", pvc.Name)
framework.ExpectNoError(framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout))
}
By("Creating a pod and initializing data")
By("Creating a StatefulSet pod to initialize data")
writeCmd := "true"
for i, pvc := range pvcs {
// mountPath is /mnt/volume<i+1>
writeCmd += fmt.Sprintf("&& touch /mnt/volume%v/%v", i+1, pvc.Name)
for i := 0; i < numVols; i++ {
writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
}
pod := framework.MakePod(ns, nil, pvcs, false, writeCmd)
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
writeCmd += "&& sleep 10000"
By("Recreating the pod and validating the data")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod))
validateCmd := "true"
for i, pvc := range pvcs {
// mountPath is /mnt/volume<i+1>
validateCmd += fmt.Sprintf("&& test -f /mnt/volume%v/%v", i+1, pvc.Name)
probe := &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Check that the last file got created
Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
},
},
InitialDelaySeconds: 1,
PeriodSeconds: 1,
}
pod = framework.MakePod(ns, nil, pvcs, false, validateCmd)
pod, err = c.CoreV1().Pods(ns).Create(pod)
mounts := []v1.VolumeMount{}
claims := []v1.PersistentVolumeClaim{}
for i := 0; i < numVols; i++ {
pvc := framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{}, ns)
pvc.Name = getVolName(i)
mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
claims = append(claims, *pvc)
}
spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
ss, err := c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))
ssTester.WaitForRunningAndReady(1, ss)
By("Deleting the StatefulSet but not the volumes")
// Scale down to 0 first so that the Delete is quick
ss, err = ssTester.Scale(ss, 0)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForStatusReplicas(ss, 0)
err = c.AppsV1().StatefulSets(ns).Delete(ss.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("Creating a new Statefulset and validating the data")
validateCmd := "true"
for i := 0; i < numVols; i++ {
validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
}
validateCmd += "&& sleep 10000"
spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
ss, err = c.AppsV1().StatefulSets(ns).Create(spec)
Expect(err).NotTo(HaveOccurred())
ssTester.WaitForRunningAndReady(1, ss)
})
})
})
})
func getVolName(i int) string {
return fmt.Sprintf("vol%v", i)
}
func getMountPath(i int) string {
return fmt.Sprintf("/mnt/%v", getVolName(i))
}
func getVolumeFile(i int) string {
return fmt.Sprintf("%v/data%v", getMountPath(i), i)
}
func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v1.PersistentVolumeClaim, readyProbe *v1.Probe) *appsv1.StatefulSet {
ssReplicas := int32(1)
labels := map[string]string{"app": "many-volumes-test"}
return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "many-volumes-test",
Namespace: ns,
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "many-volumes-test"},
},
Replicas: &ssReplicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Command: []string{"/bin/sh"},
Args: []string{"-c", cmd},
VolumeMounts: mounts,
ReadinessProbe: readyProbe,
},
},
},
},
VolumeClaimTemplates: claims,
},
}
}