diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go index 41e419c78b0..809f6540ee7 100644 --- a/cmd/kubelet/app/plugins.go +++ b/cmd/kubelet/app/plugins.go @@ -28,6 +28,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/empty_dir" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_pd" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/git_repo" + "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/glusterfs" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/iscsi" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/nfs" @@ -55,6 +56,8 @@ func ProbeVolumePlugins() []volume.VolumePlugin { allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...) allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...) allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...) + allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...) + return allPlugins } diff --git a/examples/examples_test.go b/examples/examples_test.go index e0c182032d0..e0d9cebc5cb 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -181,6 +181,9 @@ func TestExampleObjectSchemas(t *testing.T) { "../examples/iscsi/v1beta3": { "iscsi": &api.Pod{}, }, + "../examples/glusterfs/v1beta3": { + "glusterfs": &api.Pod{}, + }, } for path, expected := range cases { diff --git a/examples/glusterfs/README.md b/examples/glusterfs/README.md new file mode 100644 index 00000000000..e9d7a7dbc72 --- /dev/null +++ b/examples/glusterfs/README.md @@ -0,0 +1,47 @@ +## Glusterfs + +[Glusterfs](http://www.gluster.org) is an open source scale-out filesystem. These examples provide information about how to allow containers use Glusterfs volumes. + +The example assumes that the Glusterfs client package is installed on all nodes. + +### Prerequisites + +Install Glusterfs client package on the Kubernetes hosts. + +### Create a POD + +The following *volume* spec illustrates a sample configuration. + +```js +{ + "name": "glusterfsvol", + "glusterfs": { + "endpoints": "glusterfs-cluster", + "path": "kube_vol", + "readOnly": true + } +} +``` + +The parameters are explained as the followings. + +- **endpoints** is endpoints name that represents a Gluster cluster configuration. *kubelet* is optimized to avoid mount storm, it will randomly pick one from the endpoints to mount. If this host is unresponsive, the next Gluster host in the endpoints is automatically selected. +- **path** is the Glusterfs volume name. +- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite. + +Detailed POD and Gluster cluster endpoints examples can be found at [v1beta3/](v1beta3/) and [endpoints/](endpoints/) + +```shell +# create gluster cluster endpoints +$ kubectl create -f examples/glusterfs/endpoints/glusterfs-endpoints.json +# create a container using gluster volume +$ kubectl create -f examples/glusterfs/v1beta3/glusterfs.json +``` +Once that's up you can list the pods and endpoint in the cluster, to verify that the master is running: + +```shell +$ kubectl get endpoints +$ kubectl get pods +``` + +If you ssh to that machine, you can run `docker ps` to see the actual pod and `mount` to see if the Glusterfs volume is mounted. \ No newline at end of file diff --git a/examples/glusterfs/endpoints/glusterfs-endpoints.json b/examples/glusterfs/endpoints/glusterfs-endpoints.json new file mode 100644 index 00000000000..886f7a8c875 --- /dev/null +++ b/examples/glusterfs/endpoints/glusterfs-endpoints.json @@ -0,0 +1,13 @@ +{ + "apiVersion": "v1beta1", + "id": "glusterfs-cluster", + "kind": "Endpoints", + "metadata": { + "name": "glusterfs-cluster" + }, + "Endpoints": [ + "10.16.154.81:0", + "10.16.154.82:0", + "10.16.154.83:0" + ] +} \ No newline at end of file diff --git a/examples/glusterfs/v1beta3/glusterfs.json b/examples/glusterfs/v1beta3/glusterfs.json new file mode 100644 index 00000000000..664a35dc0fa --- /dev/null +++ b/examples/glusterfs/v1beta3/glusterfs.json @@ -0,0 +1,32 @@ +{ + "apiVersion": "v1beta3", + "id": "glusterfs", + "kind": "Pod", + "metadata": { + "name": "glusterfs" + }, + "spec": { + "containers": [ + { + "name": "glusterfs", + "image": "kubernetes/pause", + "volumeMounts": [ + { + "mountPath": "/mnt/glusterfs", + "name": "glusterfsvol" + } + ] + } + ], + "volumes": [ + { + "name": "glusterfsvol", + "glusterfs": { + "endpoints": "glusterfs-cluster", + "path": "kube_vol", + "readOnly": true + } + } + ] + } +} \ No newline at end of file diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index fa5f4d56d9c..ae3df8de5ce 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -174,6 +174,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { // Exactly one of the fields should be set. //FIXME: the fuzz can still end up nil. What if fuzz allowed me to say that? fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI) + fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI, &vs.Glusterfs) }, func(d *api.DNSPolicy, c fuzz.Continue) { policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault} diff --git a/pkg/api/types.go b/pkg/api/types.go index c2b603ec247..2952bb29d2d 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -198,6 +198,8 @@ type VolumeSource struct { // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. ISCSI *ISCSIVolumeSource `json:"iscsi"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + Glusterfs *GlusterfsVolumeSource `json:"glusterfs"` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -210,6 +212,8 @@ type PersistentVolumeSource struct { // This is useful for development and testing only. // on-host storage is not supported in any way HostPath *HostPathVolumeSource `json:"hostPath"` + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + Glusterfs *GlusterfsVolumeSource `json:"glusterfs"` } type PersistentVolume struct { @@ -421,6 +425,19 @@ type NFSVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty"` } +// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string `json:"endpoints"` + + // Required: Path is the Glusterfs volume path + Path string `json:"path"` + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + ReadOnly bool `json:"readOnly,omitempty"` +} + // ContainerPort represents a network port in a single container type ContainerPort struct { // Optional: If specified, this must be a DNS_LABEL. Each named port diff --git a/pkg/api/v1beta1/conversion.go b/pkg/api/v1beta1/conversion.go index 99abb4f72db..f7f20323375 100644 --- a/pkg/api/v1beta1/conversion.go +++ b/pkg/api/v1beta1/conversion.go @@ -1179,6 +1179,9 @@ func init() { if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil { return err } + if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil { + return err + } return nil }, func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error { @@ -1203,6 +1206,9 @@ func init() { if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil { return err } + if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil { + return err + } return nil }, diff --git a/pkg/api/v1beta1/types.go b/pkg/api/v1beta1/types.go index e6d4fd33174..8fbcb8e025b 100644 --- a/pkg/api/v1beta1/types.go +++ b/pkg/api/v1beta1/types.go @@ -114,6 +114,8 @@ type VolumeSource struct { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -126,6 +128,8 @@ type PersistentVolumeSource struct { // This is useful for development and testing only. // on-host storage is not supported in any way. HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"` + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"` } type PersistentVolume struct { @@ -1493,3 +1497,16 @@ type SecretList struct { Items []Secret `json:"items" description:"items is a list of secret objects"` } + +// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"` + + // Required: Path is the Glusterfs volume path + Path string `json:"path" description:"path to gluster volume"` + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs volume to be mounted with read-only permissions + ReadOnly bool `json:"readOnly,omitempty" description:"Glusterfs volume to be mounted with read-only permissions"` +} diff --git a/pkg/api/v1beta2/conversion.go b/pkg/api/v1beta2/conversion.go index e30869a3063..0faad5d5a9b 100644 --- a/pkg/api/v1beta2/conversion.go +++ b/pkg/api/v1beta2/conversion.go @@ -1106,6 +1106,9 @@ func init() { if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil { return err } + if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil { + return err + } return nil }, func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error { @@ -1130,6 +1133,9 @@ func init() { if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil { return err } + if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil { + return err + } return nil }, diff --git a/pkg/api/v1beta2/types.go b/pkg/api/v1beta2/types.go index 1cb0522e750..79226417156 100644 --- a/pkg/api/v1beta2/types.go +++ b/pkg/api/v1beta2/types.go @@ -83,6 +83,8 @@ type VolumeSource struct { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -95,6 +97,8 @@ type PersistentVolumeSource struct { // This is useful for development and testing only. // on-host storage is not supported in any way. HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"` + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"` } type PersistentVolume struct { @@ -307,6 +311,19 @@ type ISCSIVolumeSource struct { ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"` } +// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"` + + // Required: Path is the Glusterfs volume path + Path string `json:"path" description:"path to gluster volume"` + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs volume to be mounted with read-only permissions + ReadOnly bool `json:"readOnly,omitempty" description:"glusterfs volume to be mounted with read-only permissions"` +} + // VolumeMount describes a mounting of a Volume within a container. // // https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/volumes.md diff --git a/pkg/api/v1beta3/types.go b/pkg/api/v1beta3/types.go index 23070a36477..9775901db21 100644 --- a/pkg/api/v1beta3/types.go +++ b/pkg/api/v1beta3/types.go @@ -215,6 +215,8 @@ type VolumeSource struct { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "` } // Similar to VolumeSource but meant for the administrator who creates PVs. @@ -227,6 +229,8 @@ type PersistentVolumeSource struct { // This is useful for development and testing only. // on-host storage is not supported in any way. HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"` + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"` } type PersistentVolume struct { @@ -343,6 +347,19 @@ type EmptyDirVolumeSource struct { Medium StorageType `json:"medium" description:"type of storage used to back the volume; must be an empty string (default) or Memory"` } +// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"` + + // Required: Path is the Glusterfs volume path + Path string `json:"path" description:"path to gluster volume"` + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs volume to be mounted with read-only permissions + ReadOnly bool `json:"readOnly,omitempty" description:"glusterfs volume to be mounted with read-only permissions"` +} + // StorageType defines ways that storage can be allocated to a volume. type StorageType string diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 32ef1ea1f14..9563a7ce566 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -311,6 +311,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList { numVolumes++ allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI).Prefix("iscsi")...) } + if source.Glusterfs != nil { + numVolumes++ + allErrs = append(allErrs, validateGlusterfs(source.Glusterfs).Prefix("glusterfs")...) + } if numVolumes != 1 { allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required")) } @@ -386,6 +390,17 @@ func validateNFS(nfs *api.NFSVolumeSource) errs.ValidationErrorList { return allErrs } +func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource) errs.ValidationErrorList { + allErrs := errs.ValidationErrorList{} + if glusterfs.EndpointsName == "" { + allErrs = append(allErrs, errs.NewFieldRequired("endpoints")) + } + if glusterfs.Path == "" { + allErrs = append(allErrs, errs.NewFieldRequired("path")) + } + return allErrs +} + func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) { return nameIsDNSSubdomain(name, prefix) } diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 8444cd78400..b866bbec22d 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -519,6 +519,7 @@ func TestValidateVolumes(t *testing.T) { {Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{"my-repo", "hashstring"}}}, {Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "iqn.2015-02.example.com:test", 1, "ext4", false}}}, {Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{"my-secret"}}}, + {Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host1", "path", false}}}, } names, errs := validateVolumes(successCase) if len(errs) != 0 { @@ -530,6 +531,8 @@ func TestValidateVolumes(t *testing.T) { emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}} emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"", "iqn.2015-02.example.com:test", 1, "ext4", false}} emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "", 1, "ext4", false}} + emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"", "path", false}} + emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host", "", false}} errorCases := map[string]struct { V []api.Volume T errors.ValidationErrorType @@ -541,6 +544,8 @@ func TestValidateVolumes(t *testing.T) { "name not unique": {[]api.Volume{{Name: "abc", VolumeSource: emptyVS}, {Name: "abc", VolumeSource: emptyVS}}, errors.ValidationErrorTypeDuplicate, "[1].name"}, "empty portal": {[]api.Volume{{Name: "badportal", VolumeSource: emptyPortal}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.targetPortal"}, "empty iqn": {[]api.Volume{{Name: "badiqn", VolumeSource: emptyIQN}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.iqn"}, + "empty hosts": {[]api.Volume{{Name: "badhost", VolumeSource: emptyHosts}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.endpoints"}, + "empty path": {[]api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.path"}, } for k, v := range errorCases { _, errs := validateVolumes(v.V) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go new file mode 100644 index 00000000000..b711f5d4021 --- /dev/null +++ b/pkg/volume/glusterfs/glusterfs.go @@ -0,0 +1,219 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package glusterfs + +import ( + "math/rand" + "os" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/types" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" + "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" + "github.com/golang/glog" +) + +// This is the primary entrypoint for volume plugins. +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&glusterfsPlugin{nil}} +} + +type glusterfsPlugin struct { + host volume.VolumeHost +} + +var _ volume.VolumePlugin = &glusterfsPlugin{} + +const ( + glusterfsPluginName = "kubernetes.io/glusterfs" +) + +func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) { + plugin.host = host +} + +func (plugin *glusterfsPlugin) Name() string { + return glusterfsPluginName +} + +func (plugin *glusterfsPlugin) CanSupport(spec *api.Volume) bool { + if spec.VolumeSource.Glusterfs != nil { + return true + } + return false +} + +func (plugin *glusterfsPlugin) GetAccessModes() []api.AccessModeType { + return []api.AccessModeType{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + } +} + +func (plugin *glusterfsPlugin) NewBuilder(spec *api.Volume, podRef *api.ObjectReference) (volume.Builder, error) { + ep_name := spec.VolumeSource.Glusterfs.EndpointsName + ns := api.NamespaceDefault + ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name) + if err != nil { + glog.Errorf("Glusterfs: failed to get endpoints %s[%v]", ep_name, err) + return nil, err + } + glog.V(1).Infof("Glusterfs: endpoints %v", ep) + return plugin.newBuilderInternal(spec, ep, podRef, mount.New(), exec.New()) +} + +func (plugin *glusterfsPlugin) newBuilderInternal(spec *api.Volume, ep *api.Endpoints, podRef *api.ObjectReference, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) { + return &glusterfs{ + volName: spec.Name, + hosts: ep, + path: spec.VolumeSource.Glusterfs.Path, + readonly: spec.VolumeSource.Glusterfs.ReadOnly, + mounter: mounter, + exe: exe, + podRef: podRef, + plugin: plugin, + }, nil +} + +func (plugin *glusterfsPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { + return plugin.newCleanerInternal(volName, podUID, mount.New()) +} + +func (plugin *glusterfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { + return &glusterfs{ + volName: volName, + mounter: mounter, + podRef: &api.ObjectReference{UID: podUID}, + plugin: plugin, + }, nil +} + +// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export. +type glusterfs struct { + volName string + podRef *api.ObjectReference + hosts *api.Endpoints + path string + readonly bool + mounter mount.Interface + exe exec.Interface + plugin *glusterfsPlugin +} + +// SetUp attaches the disk and bind mounts to the volume path. +func (glusterfsVolume *glusterfs) SetUp() error { + return glusterfsVolume.SetUpAt(glusterfsVolume.GetPath()) +} + +func (glusterfsVolume *glusterfs) SetUpAt(dir string) error { + mountpoint, err := glusterfsVolume.mounter.IsMountPoint(dir) + glog.V(4).Infof("Glusterfs: mount set up: %s %v %v", dir, mountpoint, err) + if err != nil && !os.IsNotExist(err) { + return err + } + if mountpoint { + return nil + } + path := glusterfsVolume.path + os.MkdirAll(dir, 0750) + err = glusterfsVolume.execMount(glusterfsVolume.hosts, path, dir, glusterfsVolume.readonly) + if err == nil { + return nil + } + + // cleanup upon failure + glusterfsVolume.cleanup(dir) + // return error + return err +} + +func (glusterfsVolume *glusterfs) GetPath() string { + name := glusterfsPluginName + return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.podRef.UID, util.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName) +} + +func (glusterfsVolume *glusterfs) TearDown() error { + return glusterfsVolume.TearDownAt(glusterfsVolume.GetPath()) +} + +func (glusterfsVolume *glusterfs) TearDownAt(dir string) error { + return glusterfsVolume.cleanup(dir) +} + +func (glusterfsVolume *glusterfs) cleanup(dir string) error { + mountpoint, err := glusterfsVolume.mounter.IsMountPoint(dir) + if err != nil { + glog.Errorf("Glusterfs: Error checking IsMountPoint: %v", err) + return err + } + if !mountpoint { + return os.RemoveAll(dir) + } + + if err := glusterfsVolume.mounter.Unmount(dir, 0); err != nil { + glog.Errorf("Glusterfs: Unmounting failed: %v", err) + return err + } + mountpoint, mntErr := glusterfsVolume.mounter.IsMountPoint(dir) + if mntErr != nil { + glog.Errorf("Glusterfs: IsMountpoint check failed: %v", mntErr) + return mntErr + } + if !mountpoint { + if err := os.RemoveAll(dir); err != nil { + return err + } + } + + return nil +} + +func (glusterfsVolume *glusterfs) execMount(hosts *api.Endpoints, path string, mountpoint string, readonly bool) error { + var errs error + var command exec.Cmd + var mountArgs []string + var opt []string + + // build option array + if readonly == true { + opt = []string{"-o", "ro"} + } else { + opt = []string{"-o", "rw"} + } + + l := len(hosts.Subsets) + // avoid mount storm, pick a host randomly + start := rand.Int() % l + // iterate all hosts until mount succeeds. + for i := start; i < start+l; i++ { + arg := []string{"-t", "glusterfs", hosts.Subsets[i%l].Addresses[0].IP + ":" + path, mountpoint} + mountArgs = append(arg, opt...) + glog.V(1).Infof("Glusterfs: mount cmd: mount %v", strings.Join(mountArgs, " ")) + command = glusterfsVolume.exe.Command("mount", mountArgs...) + + _, errs = command.CombinedOutput() + if errs == nil { + return nil + } + } + glog.Errorf("Glusterfs: mount failed: %v", errs) + return errs +} diff --git a/pkg/volume/glusterfs/glusterfs_test.go b/pkg/volume/glusterfs/glusterfs_test.go new file mode 100644 index 00000000000..32e9f1ef2f8 --- /dev/null +++ b/pkg/volume/glusterfs/glusterfs_test.go @@ -0,0 +1,134 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package glusterfs + +import ( + "os" + "testing" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/types" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" + "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" +) + +func TestCanSupport(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil)) + plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + if plug.Name() != "kubernetes.io/glusterfs" { + t.Errorf("Wrong name: %s", plug.Name()) + } + if !plug.CanSupport(&api.Volume{VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{}}}) { + t.Errorf("Expected true") + } + if plug.CanSupport(&api.Volume{VolumeSource: api.VolumeSource{}}) { + t.Errorf("Expected false") + } +} + +func TestGetAccessModes(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + + plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) { + t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany) + } +} + +func contains(modes []api.AccessModeType, mode api.AccessModeType) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +func TestPlugin(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + spec := &api.Volume{ + Name: "vol1", + VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false}}, + } + ep := &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "foo"}, Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}}}} + var fcmd exec.FakeCmd + fcmd = exec.FakeCmd{ + CombinedOutputScript: []exec.FakeCombinedOutputAction{ + // mount + func() ([]byte, error) { + return []byte{}, nil + }, + }, + } + fake := exec.FakeExec{ + CommandScript: []exec.FakeCommandAction{ + func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) }, + }, + } + builder, err := plug.(*glusterfsPlugin).newBuilderInternal(spec, ep, &api.ObjectReference{UID: types.UID("poduid")}, &mount.FakeMounter{}, &fake) + volumePath := builder.GetPath() + if err != nil { + t.Errorf("Failed to make a new Builder: %v", err) + } + if builder == nil { + t.Errorf("Got a nil Builder: %v") + } + path := builder.GetPath() + if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~glusterfs/vol1" { + t.Errorf("Got unexpected path: %s", path) + } + if err := builder.SetUp(); err != nil { + t.Errorf("Expected success, got: %v", err) + } + if _, err := os.Stat(volumePath); err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volumePath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + cleaner, err := plug.(*glusterfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) + if err != nil { + t.Errorf("Failed to make a new Cleaner: %v", err) + } + if cleaner == nil { + t.Errorf("Got a nil Cleaner: %v") + } + if err := cleaner.TearDown(); err != nil { + t.Errorf("Expected success, got: %v", err) + } + if _, err := os.Stat(volumePath); err == nil { + t.Errorf("TearDown() failed, volume path still exists: %s", volumePath) + } else if !os.IsNotExist(err) { + t.Errorf("SetUp() failed: %v", err) + } +}