Merge pull request #29006 from jsafrane/dynprov2

Automatic merge from submit-queue

Implement dynamic provisioning (beta) of PersistentVolumes via StorageClass

Implemented according to PR #26908. There are several patches in this PR with one huge code regen inside.

* Please review the API changes (the first patch) carefully, sometimes I don't know what the code is doing...

* `PV.Spec.Class` and `PVC.Spec.Class` is not implemented, use annotation `volume.alpha.kubernetes.io/storage-class`

* See e2e test and integration test changes - Kubernetes won't provision a thing without explicit configuration of at least one `StorageClass` instance!

* Multiple provisioning volume plugins can coexist together, e.g. HostPath and AWS EBS. This is important for Gluster and RBD provisioners in #25026

* Contradicting the proposal, `claim.Selector` and `volume.alpha.kubernetes.io/storage-class` annotation are **not** mutually exclusive. They're both used for matching existing PVs. However, only `volume.alpha.kubernetes.io/storage-class` is used for provisioning, configuration of provisioning with `Selector` is left for (near) future.

* Documentation is missing. Can please someone write some while I am out?

For now, AWS volume plugin accepts classes with these parameters:

```
kind: StorageClass
metadata:
  name: slow
provisionerType: kubernetes.io/aws-ebs
provisionerParameters:
  type: io1
  zone: us-east-1d
  iopsPerGB: 10
```

* parameters are case-insensitive
* `type`: `io1`, `gp2`, `sc1`, `st1`. See AWS docs for details
* `iopsPerGB`: only for `io1` volumes. I/O operations per second per GiB. AWS volume plugin multiplies this with size of requested volume to compute IOPS of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see AWS docs).
* of course, the plugin will use some defaults when a parameter is omitted in a `StorageClass` instance (`gp2` in the same zone as in 1.3).

GCE:

```
apiVersion: extensions/v1beta1
kind: StorageClass
metadata:
  name: slow
provisionerType: kubernetes.io/gce-pd
provisionerParameters:
  type: pd-standard
  zone: us-central1-a
```

* `type`: `pd-standard` or `pd-ssd`
* `zone`: GCE zone
* of course, the plugin will use some defaults when a parameter is omitted in a `StorageClass` instance (SSD in the same zone as in 1.3 ?).


No OpenStack/Cinder yet

@kubernetes/sig-storage
This commit is contained in:
Kubernetes Submit Queue
2016-08-18 09:56:16 -07:00
committed by GitHub
31 changed files with 1180 additions and 349 deletions

View File

@@ -469,7 +469,7 @@ func createPD() (string, error) {
}
tags := map[string]string{}
err = gceCloud.CreateDisk(pdName, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, framework.TestContext.CloudConfig.Zone, 10 /* sizeGb */, tags)
if err != nil {
return "", err
}

View File

@@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
@@ -37,6 +38,69 @@ const (
expectedSize = "2Gi"
)
func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) {
err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
requestedCapacity := resource.MustParse(requestedSize)
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
// Check PV properties
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable")
runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
By("deleting the claim")
framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))
// Wait for the PV to get deleted too.
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
var _ = framework.KubeDescribe("Dynamic provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
@@ -52,86 +116,47 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
framework.KubeDescribe("DynamicProvisioner", func() {
It("should create and delete persistent volumes", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a StorageClass")
class := newStorageClass()
_, err := c.Extensions().StorageClasses().Create(class)
defer c.Extensions().StorageClasses().Delete(class.Name)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := createClaim(ns)
claim := newClaim(ns, false)
defer func() {
c.PersistentVolumeClaims(ns).Delete(claim.Name)
}()
claim, err = c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
testDynamicProvisioning(c, claim)
})
})
framework.KubeDescribe("DynamicProvisioner Alpha", func() {
It("should create and delete alpha persistent volumes", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a claim with an alpha dynamic provisioning annotation")
claim := newClaim(ns, true)
defer func() {
c.PersistentVolumeClaims(ns).Delete(claim.Name)
}()
claim, err := c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
requestedCapacity := resource.MustParse(requestedSize)
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
// Check PV properties
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable")
runInPodWithVolume(c, ns, claim.Name, "echo 'hello world' > /mnt/test/data")
By("checking the created volume is readable and retains data")
runInPodWithVolume(c, ns, claim.Name, "grep 'hello world' /mnt/test/data")
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
By("deleting the claim")
framework.ExpectNoError(c.PersistentVolumeClaims(ns).Delete(claim.Name))
// Wait for the PV to get deleted too.
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, 20*time.Minute))
testDynamicProvisioning(c, claim)
})
})
})
func createClaim(ns string) *api.PersistentVolumeClaim {
return &api.PersistentVolumeClaim{
func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim {
claim := api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "",
},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
@@ -144,6 +169,19 @@ func createClaim(ns string) *api.PersistentVolumeClaim {
},
},
}
if alpha {
claim.Annotations = map[string]string{
"volume.alpha.kubernetes.io/storage-class": "",
}
} else {
claim.Annotations = map[string]string{
"volume.beta.kubernetes.io/storage-class": "fast",
}
}
return &claim
}
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
@@ -192,3 +230,26 @@ func runInPodWithVolume(c *client.Client, ns, claimName, command string) {
framework.ExpectNoError(err, "Failed to create pod: %v", err)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Spec.Containers[0].Name, pod.Namespace))
}
func newStorageClass() *extensions.StorageClass {
var pluginName string
switch {
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
pluginName = "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
pluginName = "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
pluginName = "kubernetes.io/cinder"
}
return &extensions.StorageClass{
TypeMeta: unversioned.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: api.ObjectMeta{
Name: "fast",
},
Provisioner: pluginName,
}
}

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
@@ -62,6 +63,8 @@ func init() {
const defaultObjectCount = 100
const defaultSyncPeriod = 10 * time.Second
const provisionerPluginName = "kubernetes.io/mock-provisioner"
func getObjectCount() int {
objectCount := defaultObjectCount
if s := os.Getenv("KUBE_INTEGRATION_PV_OBJECTS"); s != "" {
@@ -849,8 +852,20 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
// non-namespaced objects (PersistenceVolumes and StorageClasses).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, api.ListOptions{})
defer testClient.Extensions().StorageClasses().DeleteCollection(nil, api.ListOptions{})
storageClass := extensions.StorageClass{
TypeMeta: unversioned.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: api.ObjectMeta{
Name: "gold",
},
Provisioner: provisionerPluginName,
}
testClient.Extensions().StorageClasses().Create(&storageClass)
binder.Run()
defer binder.Stop()
@@ -860,7 +875,7 @@ func TestPersistentVolumeProvisionMultiPVCs(t *testing.T) {
for i := 0; i < objCount; i++ {
pvc := createPVC("pvc-provision-"+strconv.Itoa(i), ns.Name, "1G", []api.PersistentVolumeAccessMode{api.ReadWriteOnce})
pvc.Annotations = map[string]string{
"volume.alpha.kubernetes.io/storage-class": "",
"volume.beta.kubernetes.io/storage-class": "gold",
}
pvcs[i] = pvc
}
@@ -1086,7 +1101,7 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
plugin := &volumetest.FakeVolumePlugin{
PluginName: "plugin-name",
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
@@ -1101,7 +1116,18 @@ func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeri
cloud := &fake_cloud.FakeCloud{}
syncPeriod = getSyncPeriod(syncPeriod)
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, plugin, plugins, cloud, "", nil, nil, nil, true)
ctrl := persistentvolumecontroller.NewPersistentVolumeController(
binderClient,
syncPeriod,
nil, // alpha provisioner
plugins,
cloud,
"", // cluster name
nil, // volumeSource
nil, // claimSource
nil, // classSource
nil, // eventRecorder
true) // enableDynamicProvisioning
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
if err != nil {