diff --git a/cmd/kube-controller-manager/app/plugins.go b/cmd/kube-controller-manager/app/plugins.go index b58fcda5dca..ebdbd397f25 100644 --- a/cmd/kube-controller-manager/app/plugins.go +++ b/cmd/kube-controller-manager/app/plugins.go @@ -29,6 +29,7 @@ import ( // Volume plugins "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" + "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack" "k8s.io/kubernetes/pkg/util/io" "k8s.io/kubernetes/pkg/volume" @@ -91,8 +92,8 @@ func NewVolumeProvisioner(cloud cloudprovider.Interface, flags VolumeConfigFlags return getProvisionablePluginFromVolumePlugins(host_path.ProbeVolumePlugins(volume.VolumeConfig{})) case cloud != nil && aws.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(aws_ebs.ProbeVolumePlugins()) - // case cloud != nil && gce.ProviderName == cloud.ProviderName(): - // return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) + case cloud != nil && gce.ProviderName == cloud.ProviderName(): + return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins()) case cloud != nil && openstack.ProviderName == cloud.ProviderName(): return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins()) } diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 7cf0cce6b88..ba3c6b6f3fd 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -17,12 +17,14 @@ limitations under the License. package gce_pd import ( + "fmt" "os" "path" "strconv" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/exec" @@ -41,6 +43,8 @@ type gcePersistentDiskPlugin struct { var _ volume.VolumePlugin = &gcePersistentDiskPlugin{} var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{} +var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{} +var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{} const ( gcePersistentDiskPluginName = "kubernetes.io/gce-pd" @@ -122,12 +126,50 @@ func (plugin *gcePersistentDiskPlugin) newCleanerInternal(volName string, podUID }}, nil } +func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + return plugin.newDeleterInternal(spec, &GCEDiskUtil{}) +} + +func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, manager pdManager) (volume.Deleter, error) { + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk == nil { + return nil, fmt.Errorf("spec.PersistentVolumeSource.GCEPersistentDisk is nil") + } + return &gcePersistentDiskDeleter{ + gcePersistentDisk: &gcePersistentDisk{ + volName: spec.Name(), + pdName: spec.PersistentVolume.Spec.GCEPersistentDisk.PDName, + manager: manager, + plugin: plugin, + }}, nil +} + +func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if len(options.AccessModes) == 0 { + options.AccessModes = plugin.GetAccessModes() + } + return plugin.newProvisionerInternal(options, &GCEDiskUtil{}) +} + +func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, manager pdManager) (volume.Provisioner, error) { + return &gcePersistentDiskProvisioner{ + gcePersistentDisk: &gcePersistentDisk{ + manager: manager, + plugin: plugin, + }, + options: options, + }, nil +} + // Abstract interface to PD operations. type pdManager interface { // Attaches the disk to the kubelet's host machine. AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error // Detaches the disk from the kubelet's host machine. DetachDisk(c *gcePersistentDiskCleaner) error + // Creates a volume + CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) + // Deletes a volume + DeleteVolume(deleter *gcePersistentDiskDeleter) error } // gcePersistentDisk volumes are disk resources provided by Google Compute Engine @@ -301,3 +343,66 @@ func (c *gcePersistentDiskCleaner) TearDownAt(dir string) error { } return nil } + +type gcePersistentDiskDeleter struct { + *gcePersistentDisk +} + +var _ volume.Deleter = &gcePersistentDiskDeleter{} + +func (d *gcePersistentDiskDeleter) GetPath() string { + name := gcePersistentDiskPluginName + return d.plugin.host.GetPodVolumeDir(d.podUID, util.EscapeQualifiedNameForDisk(name), d.volName) +} + +func (d *gcePersistentDiskDeleter) Delete() error { + return d.manager.DeleteVolume(d) +} + +type gcePersistentDiskProvisioner struct { + *gcePersistentDisk + options volume.VolumeOptions +} + +var _ volume.Provisioner = &gcePersistentDiskProvisioner{} + +func (c *gcePersistentDiskProvisioner) Provision(pv *api.PersistentVolume) error { + volumeID, sizeGB, err := c.manager.CreateVolume(c) + if err != nil { + return err + } + pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName = volumeID + pv.Spec.Capacity = api.ResourceList{ + api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + } + return nil +} + +func (c *gcePersistentDiskProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) { + // Provide dummy api.PersistentVolume.Spec, it will be filled in + // gcePersistentDiskProvisioner.Provision() + return &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + GenerateName: "pv-gce-", + Labels: map[string]string{}, + Annotations: map[string]string{ + "kubernetes.io/createdby": "gce-pd-dynamic-provisioner", + }, + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy, + AccessModes: c.options.AccessModes, + Capacity: api.ResourceList{ + api.ResourceName(api.ResourceStorage): c.options.Capacity, + }, + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + PDName: "dummy", + FSType: "ext4", + Partition: 0, + ReadOnly: false, + }, + }, + }, + }, nil +} diff --git a/pkg/volume/gce_pd/gce_pd_test.go b/pkg/volume/gce_pd/gce_pd_test.go index 0559c14b82f..4e5651e393b 100644 --- a/pkg/volume/gce_pd/gce_pd_test.go +++ b/pkg/volume/gce_pd/gce_pd_test.go @@ -17,12 +17,14 @@ limitations under the License. package gce_pd import ( + "fmt" "io/ioutil" "os" "path" "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/types" @@ -111,6 +113,17 @@ func (fake *fakePDManager) DetachDisk(c *gcePersistentDiskCleaner) error { return nil } +func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) { + return "test-gce-volume-name", 100, nil +} + +func (fake *fakePDManager) DeleteVolume(cd *gcePersistentDiskDeleter) error { + if cd.pdName != "test-gce-volume-name" { + return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName) + } + return nil +} + func TestPlugin(t *testing.T) { tmpDir, err := ioutil.TempDir(os.TempDir(), "gcepdTest") if err != nil { @@ -190,6 +203,47 @@ func TestPlugin(t *testing.T) { if !fakeManager.detachCalled { t.Errorf("Detach watch not called") } + + // Test Provisioner + cap := resource.MustParse("100Mi") + options := volume.VolumeOptions{ + Capacity: cap, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete, + } + provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) + persistentSpec, err := provisioner.NewPersistentVolumeTemplate() + if err != nil { + t.Errorf("NewPersistentVolumeTemplate() failed: %v", err) + } + + // get 2nd Provisioner - persistent volume controller will do the same + provisioner, err = plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{}) + err = provisioner.Provision(persistentSpec) + if err != nil { + t.Errorf("Provision() failed: %v", err) + } + + if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" { + t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName) + } + cap = persistentSpec.Spec.Capacity[api.ResourceStorage] + size := cap.Value() + if size != 100*1024*1024*1024 { + t.Errorf("Provision() returned unexpected volume size: %v", size) + } + + // Test Deleter + volSpec := &volume.Spec{ + PersistentVolume: persistentSpec, + } + deleter, err := plug.(*gcePersistentDiskPlugin).newDeleterInternal(volSpec, &fakePDManager{}) + err = deleter.Delete() + if err != nil { + t.Errorf("Deleter() failed: %v", err) + } } func TestPersistentClaimReadOnlyFlag(t *testing.T) { diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 7626394a5a6..3f184f78914 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/volume" ) const ( @@ -112,6 +113,39 @@ func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error { return nil } +func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error { + cloud, err := getCloudProvider() + if err != nil { + return err + } + + if err = cloud.DeleteDisk(d.pdName); err != nil { + glog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err) + return err + } + glog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName) + return nil +} + +func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, err error) { + cloud, err := getCloudProvider() + if err != nil { + return "", 0, err + } + + name := fmt.Sprintf("kube-dynamic-%s", util.NewUUID()) + requestBytes := c.options.Capacity.Value() + // GCE works with gigabytes, convert to GiB with rounding up + requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024) + err = cloud.CreateDisk(name, int64(requestGB)) + if err != nil { + glog.V(2).Infof("Error creating GCE PD volume: %v", err) + return "", 0, err + } + glog.V(2).Infof("Successfully created GCE PD volume %s", name) + return name, int(requestGB), nil +} + // Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails. func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) { devicePaths := getDiskByIdPaths(b.gcePersistentDisk)