Add e2e test for external pv provisioning

This commit is contained in:
Matthew Wong
2017-01-06 17:07:34 -05:00
parent 41d7acc0f5
commit 7184977c54

View File

@@ -37,9 +37,14 @@ const (
// Expected size of the volume is 2GiB, because all three supported cloud
// providers allocate volumes in 1GiB chunks.
expectedSize = "2Gi"
// Expected size of the externally provisioned volume depends on the external
// provisioner: for nfs-provisioner used here, it's equal to requested
externalExpectedSize = "1500Mi"
// Plugin name of the external provisioner
externalPluginName = "example.com/nfs"
)
func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {
func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, external bool) {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
@@ -54,6 +59,9 @@ func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVol
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
if external {
expectedCapacity = resource.MustParse(externalExpectedSize)
}
pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
@@ -79,21 +87,23 @@ func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVol
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
if !external {
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
}
By("deleting the claim")
framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
@@ -119,7 +129,7 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a StorageClass")
class := newStorageClass()
class := newStorageClass("")
_, err := c.Storage().StorageClasses().Create(class)
defer c.Storage().StorageClasses().Delete(class.Name, nil)
Expect(err).NotTo(HaveOccurred())
@@ -132,7 +142,7 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
testDynamicProvisioning(c, claim)
testDynamicProvisioning(c, claim, false)
})
})
@@ -148,7 +158,31 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
claim, err := c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
testDynamicProvisioning(c, claim)
testDynamicProvisioning(c, claim, false)
})
})
framework.KubeDescribe("DynamicProvisioner External", func() {
It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
By("creating an external dynamic provisioner pod")
pod := startExternalProvisioner(c, ns)
defer c.Core().Pods(ns).Delete(pod.Name, nil)
By("creating a StorageClass")
class := newStorageClass(externalPluginName)
_, err := c.Storage().StorageClasses().Create(class)
defer c.Storage().StorageClasses().Delete(class.Name, nil)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := newClaim(ns, false)
defer func() {
c.Core().PersistentVolumeClaims(ns).Delete(claim.Name, nil)
}()
claim, err = c.Core().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
testDynamicProvisioning(c, claim, true)
})
})
})
@@ -232,16 +266,16 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
func newStorageClass() *storage.StorageClass {
var pluginName string
switch {
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
pluginName = "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
pluginName = "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
pluginName = "kubernetes.io/cinder"
func newStorageClass(pluginName string) *storage.StorageClass {
if pluginName == "" {
switch {
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
pluginName = "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
pluginName = "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
pluginName = "kubernetes.io/cinder"
}
}
return &storage.StorageClass{
@@ -254,3 +288,78 @@ func newStorageClass() *storage.StorageClass {
Provisioner: pluginName,
}
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.Core().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.1",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
By("sleeping a bit to give the provisioner time to start")
time.Sleep(5 * time.Second)
return pod
}