implement glusterfs volume plugin

Signed-off-by: Huamin Chen <hchen@redhat.com>
This commit is contained in:
Huamin Chen 2015-03-26 14:53:21 -04:00
parent d6851729d2
commit a278ceeb0a
16 changed files with 552 additions and 0 deletions

View File

@ -28,6 +28,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/empty_dir"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_pd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/git_repo"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/glusterfs"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/host_path"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/iscsi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume/nfs"
@ -55,6 +56,8 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -181,6 +181,9 @@ func TestExampleObjectSchemas(t *testing.T) {
"../examples/iscsi/v1beta3": {
"iscsi": &api.Pod{},
},
"../examples/glusterfs/v1beta3": {
"glusterfs": &api.Pod{},
},
}
for path, expected := range cases {

View File

@ -0,0 +1,47 @@
## Glusterfs
[Glusterfs](http://www.gluster.org) is an open source scale-out filesystem. These examples provide information about how to allow containers use Glusterfs volumes.
The example assumes that the Glusterfs client package is installed on all nodes.
### Prerequisites
Install Glusterfs client package on the Kubernetes hosts.
### Create a POD
The following *volume* spec illustrates a sample configuration.
```js
{
"name": "glusterfsvol",
"glusterfs": {
"endpoints": "glusterfs-cluster",
"path": "kube_vol",
"readOnly": true
}
}
```
The parameters are explained as the followings.
- **endpoints** is endpoints name that represents a Gluster cluster configuration. *kubelet* is optimized to avoid mount storm, it will randomly pick one from the endpoints to mount. If this host is unresponsive, the next Gluster host in the endpoints is automatically selected.
- **path** is the Glusterfs volume name.
- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite.
Detailed POD and Gluster cluster endpoints examples can be found at [v1beta3/](v1beta3/) and [endpoints/](endpoints/)
```shell
# create gluster cluster endpoints
$ kubectl create -f examples/glusterfs/endpoints/glusterfs-endpoints.json
# create a container using gluster volume
$ kubectl create -f examples/glusterfs/v1beta3/glusterfs.json
```
Once that's up you can list the pods and endpoint in the cluster, to verify that the master is running:
```shell
$ kubectl get endpoints
$ kubectl get pods
```
If you ssh to that machine, you can run `docker ps` to see the actual pod and `mount` to see if the Glusterfs volume is mounted.

View File

@ -0,0 +1,13 @@
{
"apiVersion": "v1beta1",
"id": "glusterfs-cluster",
"kind": "Endpoints",
"metadata": {
"name": "glusterfs-cluster"
},
"Endpoints": [
"10.16.154.81:0",
"10.16.154.82:0",
"10.16.154.83:0"
]
}

View File

@ -0,0 +1,32 @@
{
"apiVersion": "v1beta3",
"id": "glusterfs",
"kind": "Pod",
"metadata": {
"name": "glusterfs"
},
"spec": {
"containers": [
{
"name": "glusterfs",
"image": "kubernetes/pause",
"volumeMounts": [
{
"mountPath": "/mnt/glusterfs",
"name": "glusterfsvol"
}
]
}
],
"volumes": [
{
"name": "glusterfsvol",
"glusterfs": {
"endpoints": "glusterfs-cluster",
"path": "kube_vol",
"readOnly": true
}
}
]
}
}

View File

@ -174,6 +174,7 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
// Exactly one of the fields should be set.
//FIXME: the fuzz can still end up nil. What if fuzz allowed me to say that?
fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI)
fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI, &vs.Glusterfs)
},
func(d *api.DNSPolicy, c fuzz.Continue) {
policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault}

View File

@ -198,6 +198,8 @@ type VolumeSource struct {
// ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime
Glusterfs *GlusterfsVolumeSource `json:"glusterfs"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -210,6 +212,8 @@ type PersistentVolumeSource struct {
// This is useful for development and testing only.
// on-host storage is not supported in any way
HostPath *HostPathVolumeSource `json:"hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
Glusterfs *GlusterfsVolumeSource `json:"glusterfs"`
}
type PersistentVolume struct {
@ -421,6 +425,19 @@ type NFSVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod
type GlusterfsVolumeSource struct {
// Required: EndpointsName is the endpoint name that details Glusterfs topology
EndpointsName string `json:"endpoints"`
// Required: Path is the Glusterfs volume path
Path string `json:"path"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the Glusterfs to be mounted with read-only permissions
ReadOnly bool `json:"readOnly,omitempty"`
}
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be a DNS_LABEL. Each named port

View File

@ -1179,6 +1179,9 @@ func init() {
if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil {
return err
}
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
return nil
},
func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error {
@ -1203,6 +1206,9 @@ func init() {
if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil {
return err
}
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
return nil
},

View File

@ -114,6 +114,8 @@ type VolumeSource struct {
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -126,6 +128,8 @@ type PersistentVolumeSource struct {
// This is useful for development and testing only.
// on-host storage is not supported in any way.
HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"`
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"`
}
type PersistentVolume struct {
@ -1493,3 +1497,16 @@ type SecretList struct {
Items []Secret `json:"items" description:"items is a list of secret objects"`
}
// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod
type GlusterfsVolumeSource struct {
// Required: EndpointsName is the endpoint name that details Glusterfs topology
EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"`
// Required: Path is the Glusterfs volume path
Path string `json:"path" description:"path to gluster volume"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the Glusterfs volume to be mounted with read-only permissions
ReadOnly bool `json:"readOnly,omitempty" description:"Glusterfs volume to be mounted with read-only permissions"`
}

View File

@ -1106,6 +1106,9 @@ func init() {
if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil {
return err
}
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
return nil
},
func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error {
@ -1130,6 +1133,9 @@ func init() {
if err := s.Convert(&in.NFS, &out.NFS, 0); err != nil {
return err
}
if err := s.Convert(&in.Glusterfs, &out.Glusterfs, 0); err != nil {
return err
}
return nil
},

View File

@ -83,6 +83,8 @@ type VolumeSource struct {
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -95,6 +97,8 @@ type PersistentVolumeSource struct {
// This is useful for development and testing only.
// on-host storage is not supported in any way.
HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"`
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"`
}
type PersistentVolume struct {
@ -307,6 +311,19 @@ type ISCSIVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" description:"read-only if true, read-write otherwise (false or unspecified)"`
}
// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod
type GlusterfsVolumeSource struct {
// Required: EndpointsName is the endpoint name that details Glusterfs topology
EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"`
// Required: Path is the Glusterfs volume path
Path string `json:"path" description:"path to gluster volume"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the Glusterfs volume to be mounted with read-only permissions
ReadOnly bool `json:"readOnly,omitempty" description:"glusterfs volume to be mounted with read-only permissions"`
}
// VolumeMount describes a mounting of a Volume within a container.
//
// https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/volumes.md

View File

@ -215,6 +215,8 @@ type VolumeSource struct {
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi" description:"iSCSI disk attached to host machine on demand"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume that will be mounted on the host machine "`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -227,6 +229,8 @@ type PersistentVolumeSource struct {
// This is useful for development and testing only.
// on-host storage is not supported in any way.
HostPath *HostPathVolumeSource `json:"hostPath" description:"a HostPath provisioned by a developer or tester; for develment use only"`
// Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod
Glusterfs *GlusterfsVolumeSource `json:"glusterfs" description:"Glusterfs volume resource provisioned by an admin"`
}
type PersistentVolume struct {
@ -343,6 +347,19 @@ type EmptyDirVolumeSource struct {
Medium StorageType `json:"medium" description:"type of storage used to back the volume; must be an empty string (default) or Memory"`
}
// GlusterfsVolumeSource represents a Glusterfs Mount that lasts the lifetime of a pod
type GlusterfsVolumeSource struct {
// Required: EndpointsName is the endpoint name that details Glusterfs topology
EndpointsName string `json:"endpoints" description:"gluster hosts endpoints name"`
// Required: Path is the Glusterfs volume path
Path string `json:"path" description:"path to gluster volume"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the Glusterfs volume to be mounted with read-only permissions
ReadOnly bool `json:"readOnly,omitempty" description:"glusterfs volume to be mounted with read-only permissions"`
}
// StorageType defines ways that storage can be allocated to a volume.
type StorageType string

View File

@ -311,6 +311,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI).Prefix("iscsi")...)
}
if source.Glusterfs != nil {
numVolumes++
allErrs = append(allErrs, validateGlusterfs(source.Glusterfs).Prefix("glusterfs")...)
}
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@ -386,6 +390,17 @@ func validateNFS(nfs *api.NFSVolumeSource) errs.ValidationErrorList {
return allErrs
}
func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if glusterfs.EndpointsName == "" {
allErrs = append(allErrs, errs.NewFieldRequired("endpoints"))
}
if glusterfs.Path == "" {
allErrs = append(allErrs, errs.NewFieldRequired("path"))
}
return allErrs
}
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return nameIsDNSSubdomain(name, prefix)
}

View File

@ -519,6 +519,7 @@ func TestValidateVolumes(t *testing.T) {
{Name: "gitrepo", VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{"my-repo", "hashstring"}}},
{Name: "iscsidisk", VolumeSource: api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "iqn.2015-02.example.com:test", 1, "ext4", false}}},
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{"my-secret"}}},
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host1", "path", false}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
@ -530,6 +531,8 @@ func TestValidateVolumes(t *testing.T) {
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
emptyPortal := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"", "iqn.2015-02.example.com:test", 1, "ext4", false}}
emptyIQN := api.VolumeSource{ISCSI: &api.ISCSIVolumeSource{"127.0.0.1", "", 1, "ext4", false}}
emptyHosts := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"", "path", false}}
emptyPath := api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"host", "", false}}
errorCases := map[string]struct {
V []api.Volume
T errors.ValidationErrorType
@ -541,6 +544,8 @@ func TestValidateVolumes(t *testing.T) {
"name not unique": {[]api.Volume{{Name: "abc", VolumeSource: emptyVS}, {Name: "abc", VolumeSource: emptyVS}}, errors.ValidationErrorTypeDuplicate, "[1].name"},
"empty portal": {[]api.Volume{{Name: "badportal", VolumeSource: emptyPortal}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.targetPortal"},
"empty iqn": {[]api.Volume{{Name: "badiqn", VolumeSource: emptyIQN}}, errors.ValidationErrorTypeRequired, "[0].source.iscsi.iqn"},
"empty hosts": {[]api.Volume{{Name: "badhost", VolumeSource: emptyHosts}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.endpoints"},
"empty path": {[]api.Volume{{Name: "badpath", VolumeSource: emptyPath}}, errors.ValidationErrorTypeRequired, "[0].source.glusterfs.path"},
}
for k, v := range errorCases {
_, errs := validateVolumes(v.V)

View File

@ -0,0 +1,219 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"math/rand"
"os"
"strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{nil}}
}
type glusterfsPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &glusterfsPlugin{}
const (
glusterfsPluginName = "kubernetes.io/glusterfs"
)
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *glusterfsPlugin) Name() string {
return glusterfsPluginName
}
func (plugin *glusterfsPlugin) CanSupport(spec *api.Volume) bool {
if spec.VolumeSource.Glusterfs != nil {
return true
}
return false
}
func (plugin *glusterfsPlugin) GetAccessModes() []api.AccessModeType {
return []api.AccessModeType{
api.ReadWriteOnce,
api.ReadOnlyMany,
api.ReadWriteMany,
}
}
func (plugin *glusterfsPlugin) NewBuilder(spec *api.Volume, podRef *api.ObjectReference) (volume.Builder, error) {
ep_name := spec.VolumeSource.Glusterfs.EndpointsName
ns := api.NamespaceDefault
ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name)
if err != nil {
glog.Errorf("Glusterfs: failed to get endpoints %s[%v]", ep_name, err)
return nil, err
}
glog.V(1).Infof("Glusterfs: endpoints %v", ep)
return plugin.newBuilderInternal(spec, ep, podRef, mount.New(), exec.New())
}
func (plugin *glusterfsPlugin) newBuilderInternal(spec *api.Volume, ep *api.Endpoints, podRef *api.ObjectReference, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) {
return &glusterfs{
volName: spec.Name,
hosts: ep,
path: spec.VolumeSource.Glusterfs.Path,
readonly: spec.VolumeSource.Glusterfs.ReadOnly,
mounter: mounter,
exe: exe,
podRef: podRef,
plugin: plugin,
}, nil
}
func (plugin *glusterfsPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
return plugin.newCleanerInternal(volName, podUID, mount.New())
}
func (plugin *glusterfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return &glusterfs{
volName: volName,
mounter: mounter,
podRef: &api.ObjectReference{UID: podUID},
plugin: plugin,
}, nil
}
// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export.
type glusterfs struct {
volName string
podRef *api.ObjectReference
hosts *api.Endpoints
path string
readonly bool
mounter mount.Interface
exe exec.Interface
plugin *glusterfsPlugin
}
// SetUp attaches the disk and bind mounts to the volume path.
func (glusterfsVolume *glusterfs) SetUp() error {
return glusterfsVolume.SetUpAt(glusterfsVolume.GetPath())
}
func (glusterfsVolume *glusterfs) SetUpAt(dir string) error {
mountpoint, err := glusterfsVolume.mounter.IsMountPoint(dir)
glog.V(4).Infof("Glusterfs: mount set up: %s %v %v", dir, mountpoint, err)
if err != nil && !os.IsNotExist(err) {
return err
}
if mountpoint {
return nil
}
path := glusterfsVolume.path
os.MkdirAll(dir, 0750)
err = glusterfsVolume.execMount(glusterfsVolume.hosts, path, dir, glusterfsVolume.readonly)
if err == nil {
return nil
}
// cleanup upon failure
glusterfsVolume.cleanup(dir)
// return error
return err
}
func (glusterfsVolume *glusterfs) GetPath() string {
name := glusterfsPluginName
return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.podRef.UID, util.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName)
}
func (glusterfsVolume *glusterfs) TearDown() error {
return glusterfsVolume.TearDownAt(glusterfsVolume.GetPath())
}
func (glusterfsVolume *glusterfs) TearDownAt(dir string) error {
return glusterfsVolume.cleanup(dir)
}
func (glusterfsVolume *glusterfs) cleanup(dir string) error {
mountpoint, err := glusterfsVolume.mounter.IsMountPoint(dir)
if err != nil {
glog.Errorf("Glusterfs: Error checking IsMountPoint: %v", err)
return err
}
if !mountpoint {
return os.RemoveAll(dir)
}
if err := glusterfsVolume.mounter.Unmount(dir, 0); err != nil {
glog.Errorf("Glusterfs: Unmounting failed: %v", err)
return err
}
mountpoint, mntErr := glusterfsVolume.mounter.IsMountPoint(dir)
if mntErr != nil {
glog.Errorf("Glusterfs: IsMountpoint check failed: %v", mntErr)
return mntErr
}
if !mountpoint {
if err := os.RemoveAll(dir); err != nil {
return err
}
}
return nil
}
func (glusterfsVolume *glusterfs) execMount(hosts *api.Endpoints, path string, mountpoint string, readonly bool) error {
var errs error
var command exec.Cmd
var mountArgs []string
var opt []string
// build option array
if readonly == true {
opt = []string{"-o", "ro"}
} else {
opt = []string{"-o", "rw"}
}
l := len(hosts.Subsets)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
arg := []string{"-t", "glusterfs", hosts.Subsets[i%l].Addresses[0].IP + ":" + path, mountpoint}
mountArgs = append(arg, opt...)
glog.V(1).Infof("Glusterfs: mount cmd: mount %v", strings.Join(mountArgs, " "))
command = glusterfsVolume.exe.Command("mount", mountArgs...)
_, errs = command.CombinedOutput()
if errs == nil {
return nil
}
}
glog.Errorf("Glusterfs: mount failed: %v", errs)
return errs
}

View File

@ -0,0 +1,134 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"os"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.Name())
}
if !plug.CanSupport(&api.Volume{VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&api.Volume{VolumeSource: api.VolumeSource{}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !contains(plug.GetAccessModes(), api.ReadWriteOnce) || !contains(plug.GetAccessModes(), api.ReadOnlyMany) || !contains(plug.GetAccessModes(), api.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany)
}
}
func contains(modes []api.AccessModeType, mode api.AccessModeType) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false}},
}
ep := &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "foo"}, Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}}}}
var fcmd exec.FakeCmd
fcmd = exec.FakeCmd{
CombinedOutputScript: []exec.FakeCombinedOutputAction{
// mount
func() ([]byte, error) {
return []byte{}, nil
},
},
}
fake := exec.FakeExec{
CommandScript: []exec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
},
}
builder, err := plug.(*glusterfsPlugin).newBuilderInternal(spec, ep, &api.ObjectReference{UID: types.UID("poduid")}, &mount.FakeMounter{}, &fake)
volumePath := builder.GetPath()
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~glusterfs/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*glusterfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}