Merge pull request #59879 from humblec/gluster-dp-test-latest_1

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add tests for GlusterFS dynamic provisioner.

This bring up a heketi server pod and the server will be running
    in mock mode, the PVC creation should work, however the volume
    attachment to a pod and read/write is not part of this test. Due
    to the same reason the tests are marked as [fast].


Signed-off-by: Humble Chirammal <hchiramm@redhat.com>



**What this PR does / why we need it**:

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note

```
This commit is contained in:
Kubernetes Submit Queue 2018-04-30 06:09:52 -07:00 committed by GitHub
commit 34706b79e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -56,6 +56,7 @@ type storageClassTest struct {
expectedSize string expectedSize string
pvCheck func(volume *v1.PersistentVolume) error pvCheck func(volume *v1.PersistentVolume) error
nodeName string nodeName string
attach bool
} }
const ( const (
@ -98,10 +99,14 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Check sizes if class.Provisioner == "kubernetes.io/glusterfs" && framework.ProviderIs("gke", "gce") {
expectedCapacity := resource.MustParse(t.expectedSize) framework.Logf("Skipping glusterfs dynamic test for cloud provider %v", "GCE/GKE")
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] } else {
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity") // Check sizes
expectedCapacity := resource.MustParse(t.expectedSize)
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()), "pvCapacity is not equal to expectedCapacity")
}
requestedCapacity := resource.MustParse(t.claimSize) requestedCapacity := resource.MustParse(t.claimSize)
claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
@ -126,24 +131,25 @@ func testDynamicProvisioning(t storageClassTest, client clientset.Interface, cla
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
// We start two pods: if t.attach {
// - The first writes 'hello word' to the /mnt/test (= the volume). // We start two pods:
// - The second one runs grep 'hello world' on /mnt/test. // - The first writes 'hello word' to the /mnt/test (= the volume).
// If both succeed, Kubernetes actually allocated something that is // - The second one runs grep 'hello world' on /mnt/test.
// persistent across pods. // If both succeed, Kubernetes actually allocated something that is
By("checking the created volume is writable and has the PV's mount options") // persistent across pods.
command := "echo 'hello world' > /mnt/test/data" By("checking the created volume is writable and has the PV's mount options")
// We give the first pod the secondary responsibility of checking the volume has command := "echo 'hello world' > /mnt/test/data"
// been mounted with the PV's mount options, if the PV was provisioned with any // We give the first pod the secondary responsibility of checking the volume has
for _, option := range pv.Spec.MountOptions { // been mounted with the PV's mount options, if the PV was provisioned with any
// Get entry, get mount options at 6th word, replace brackets with commas for _, option := range pv.Spec.MountOptions {
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option) // Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
} }
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, command)
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, t.nodeName, "grep 'hello world' /mnt/test/data")
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@ -781,6 +787,30 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending)) Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}) })
}) })
framework.KubeDescribe("GlusterDynamicProvisioner", func() {
It("should create and delete persistent volumes [fast]", func() {
By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns)
serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := storageClassTest{
name: "Gluster Dynamic provisioner test",
provisioner: "kubernetes.io/glusterfs",
claimSize: "2Gi",
expectedSize: "2Gi",
parameters: map[string]string{"resturl": serverUrl},
attach: false,
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
testDynamicProvisioning(test, c, claim, class)
})
})
}) })
func getDefaultStorageClassName(c clientset.Interface) string { func getDefaultStorageClassName(c clientset.Interface) string {
@ -967,6 +997,55 @@ func newBetaStorageClass(t storageClassTest, suffix string) *storagebeta.Storage
} }
} }
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "glusterdynamic-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "glusterdynamic-provisioner",
Image: "docker.io/humblec/glusterdynamic-provisioner:v1.0",
Args: []string{
"-config=" + "/etc/heketi/heketi.json",
},
Ports: []v1.ContainerPort{
{Name: "heketi", ContainerPort: 8081},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod { func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns) podClient := c.CoreV1().Pods(ns)