diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json
index 8240c9f9fa4..0588601d094 100644
--- a/api/swagger-spec/v1.json
+++ b/api/swagger-spec/v1.json
@@ -12074,6 +12074,10 @@
"$ref": "v1.ISCSIVolumeSource",
"description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin."
},
+ "cinder": {
+ "$ref": "v1.CinderVolumeSource",
+ "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
+ },
"accessModes": {
"type": "array",
"items": {
@@ -12294,6 +12298,27 @@
}
}
},
+ "v1.CinderVolumeSource": {
+ "id": "v1.CinderVolumeSource",
+ "description": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.",
+ "required": [
+ "volumeID"
+ ],
+ "properties": {
+ "volumeID": {
+ "type": "string",
+ "description": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
+ },
+ "fsType": {
+ "type": "string",
+ "description": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
+ },
+ "readOnly": {
+ "type": "boolean",
+ "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
+ }
+ }
+ },
"v1.PersistentVolumeStatus": {
"id": "v1.PersistentVolumeStatus",
"description": "PersistentVolumeStatus is the current status of a persistent volume.",
@@ -12488,6 +12513,10 @@
"rbd": {
"$ref": "v1.RBDVolumeSource",
"description": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md"
+ },
+ "cinder": {
+ "$ref": "v1.CinderVolumeSource",
+ "description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
}
}
},
diff --git a/cmd/kubelet/app/plugins.go b/cmd/kubelet/app/plugins.go
index 3ec65c215f3..bdaf8c842ad 100644
--- a/cmd/kubelet/app/plugins.go
+++ b/cmd/kubelet/app/plugins.go
@@ -26,6 +26,7 @@ import (
// Volume plugins
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
+ "k8s.io/kubernetes/pkg/volume/cinder"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/git_repo"
@@ -58,7 +59,7 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
-
+ allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
return allPlugins
}
diff --git a/examples/mysql-cinder-pd/README.md b/examples/mysql-cinder-pd/README.md
new file mode 100644
index 00000000000..941fb6b245a
--- /dev/null
+++ b/examples/mysql-cinder-pd/README.md
@@ -0,0 +1,84 @@
+
+
+
+
+
+
+
+
+
+
+
PLEASE NOTE: This document applies to the HEAD of the source tree
+
+If you are using a released version of Kubernetes, you should
+refer to the docs that go with that version.
+
+
+The latest 1.0.x release of this document can be found
+[here](http://releases.k8s.io/release-1.0/examples/mysql-cinder-pd/README.md).
+
+Documentation for other releases can be found at
+[releases.k8s.io](http://releases.k8s.io).
+
+--
+
+
+
+
+
+# Mysql installation with cinder volume plugin
+
+Cinder is a Block Storage service for OpenStack. This example shows how it can be used as an attachment mounted to a pod in Kubernets.
+
+### Prerequisites
+
+Start kubelet with cloud provider as openstack with a valid cloud config
+Sample cloud_config:
+
+```
+[Global]
+auth-url=https://os-identity.vip.foo.bar.com:5443/v2.0
+username=user
+password=pass
+region=region1
+tenant-id=0c331a1df18571594d49fe68asa4e
+```
+
+Currently the cinder volume plugin is designed to work only on linux hosts and offers ext4 and ext3 as supported fs types
+Make sure that kubelet host machine has the following executables
+
+```
+/bin/lsblk -- To Find out the fstype of the volume
+/sbin/mkfs.ext3 and /sbin/mkfs.ext4 -- To format the volume if required
+/usr/bin/udevadm -- To probe the volume attached so that a symlink is created under /dev/disk/by-id/ with a virtio- prefix
+```
+
+Ensure cinder is installed and configured properly in the region in which kubelet is spun up
+
+### Example
+
+Create a cinder volume Ex:
+
+`cinder create --display-name=test-repo 2`
+
+Use the id of the cinder volume created to create a pod [definition](mysql.yaml)
+Create a new pod with the definition
+
+`cluster/kubectl.sh create -f examples/mysql-cinder-pd/mysql.yaml`
+
+This should now
+
+1. Attach the specified volume to the kubelet's host machine
+2. Format the volume if required (only if the volume specified is not already formatted to the fstype specified)
+3. Mount it on the kubelet's host machine
+4. Spin up a container with this volume mounted to the path specified in the pod definition
+
+
+
+[]()
+
diff --git a/examples/mysql-cinder-pd/mysql-service.yaml b/examples/mysql-cinder-pd/mysql-service.yaml
new file mode 100644
index 00000000000..6e2c019ac3e
--- /dev/null
+++ b/examples/mysql-cinder-pd/mysql-service.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ name: mysql
+ name: mysql
+spec:
+ ports:
+ # the port that this service should serve on
+ - port: 3306
+ # label keys and values that must match in order to receive traffic for this service
+ selector:
+ name: mysql
\ No newline at end of file
diff --git a/examples/mysql-cinder-pd/mysql.yaml b/examples/mysql-cinder-pd/mysql.yaml
new file mode 100644
index 00000000000..52e47a169bf
--- /dev/null
+++ b/examples/mysql-cinder-pd/mysql.yaml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: mysql
+ labels:
+ name: mysql
+spec:
+ containers:
+ - resources:
+ limits :
+ cpu: 0.5
+ image: mysql
+ name: mysql
+ env:
+ - name: MYSQL_ROOT_PASSWORD
+ # change this
+ value: yourpassword
+ ports:
+ - containerPort: 3306
+ name: mysql
+ volumeMounts:
+ # name must match the volume name below
+ - name: mysql-persistent-storage
+ # mount path within the container
+ mountPath: /var/lib/mysql
+ volumes:
+ - name: mysql-persistent-storage
+ cinder:
+ volumeID: bd82f7e2-wece-4c01-a505-4acf60b07f4a
+ fsType: ext4
diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go
index 5bb46937ee7..c6eb5df9fab 100644
--- a/pkg/api/deep_copy_generated.go
+++ b/pkg/api/deep_copy_generated.go
@@ -71,6 +71,13 @@ func deepCopy_api_Capabilities(in Capabilities, out *Capabilities, c *conversion
return nil
}
+func deepCopy_api_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func deepCopy_api_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
@@ -1178,6 +1185,14 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.ISCSI = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2110,6 +2125,14 @@ func deepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2150,6 +2173,7 @@ func init() {
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Binding,
deepCopy_api_Capabilities,
+ deepCopy_api_CinderVolumeSource,
deepCopy_api_ComponentCondition,
deepCopy_api_ComponentStatus,
deepCopy_api_ComponentStatusList,
diff --git a/pkg/api/types.go b/pkg/api/types.go
index c88f0c7ebb3..097d4a15f64 100644
--- a/pkg/api/types.go
+++ b/pkg/api/types.go
@@ -224,6 +224,8 @@ type VolumeSource struct {
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime
RBD *RBDVolumeSource `json:"rbd,omitempty"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@@ -248,6 +250,8 @@ type PersistentVolumeSource struct {
// ISCSIVolumeSource represents an ISCSI resource that is attached to a
// kubelet's host machine and then exposed to the pod.
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@@ -558,6 +562,21 @@ type RBDVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
+// CinderVolumeSource represents a cinder volume resource in Openstack.
+// A Cinder volume must exist and be formatted before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+type CinderVolumeSource struct {
+ // Unique id of the volume used to identify the cinder volume
+ VolumeID string `json:"volumeID"`
+ // Required: Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Only ext3 and ext4 are allowed
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
// ContainerPort represents a network port in a single container
type ContainerPort struct {
// Optional: If specified, this must be an IANA_SVC_NAME Each named port
diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go
index 6a75a1a278c..ec278caf6cd 100644
--- a/pkg/api/v1/conversion_generated.go
+++ b/pkg/api/v1/conversion_generated.go
@@ -76,6 +76,16 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *Capa
return nil
}
+func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *CinderVolumeSource, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*api.CinderVolumeSource))(in)
+ }
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *ComponentCondition, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.ComponentCondition))(in)
@@ -1355,6 +1365,14 @@ func convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.Per
} else {
out.ISCSI = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2337,6 +2355,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2390,6 +2416,16 @@ func convert_v1_Capabilities_To_api_Capabilities(in *Capabilities, out *api.Capa
return nil
}
+func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*CinderVolumeSource))(in)
+ }
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func convert_v1_ComponentCondition_To_api_ComponentCondition(in *ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ComponentCondition))(in)
@@ -3669,6 +3705,14 @@ func convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Persist
} else {
out.ISCSI = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(api.CinderVolumeSource)
+ if err := convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -4651,6 +4695,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(api.CinderVolumeSource)
+ if err := convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -4659,6 +4711,7 @@ func init() {
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
convert_api_Binding_To_v1_Binding,
convert_api_Capabilities_To_v1_Capabilities,
+ convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
convert_api_ComponentCondition_To_v1_ComponentCondition,
convert_api_ComponentStatusList_To_v1_ComponentStatusList,
convert_api_ComponentStatus_To_v1_ComponentStatus,
@@ -4773,6 +4826,7 @@ func init() {
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Binding_To_api_Binding,
convert_v1_Capabilities_To_api_Capabilities,
+ convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
convert_v1_ComponentCondition_To_api_ComponentCondition,
convert_v1_ComponentStatusList_To_api_ComponentStatusList,
convert_v1_ComponentStatus_To_api_ComponentStatus,
diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go
index e806c6654a8..24a9bf6d753 100644
--- a/pkg/api/v1/deep_copy_generated.go
+++ b/pkg/api/v1/deep_copy_generated.go
@@ -86,6 +86,13 @@ func deepCopy_v1_Capabilities(in Capabilities, out *Capabilities, c *conversion.
return nil
}
+func deepCopy_v1_CinderVolumeSource(in CinderVolumeSource, out *CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func deepCopy_v1_ComponentCondition(in ComponentCondition, out *ComponentCondition, c *conversion.Cloner) error {
out.Type = in.Type
out.Status = in.Status
@@ -1177,6 +1184,14 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
} else {
out.ISCSI = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2115,6 +2130,14 @@ func deepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(CinderVolumeSource)
+ if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2152,6 +2175,7 @@ func init() {
deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Binding,
deepCopy_v1_Capabilities,
+ deepCopy_v1_CinderVolumeSource,
deepCopy_v1_ComponentCondition,
deepCopy_v1_ComponentStatus,
deepCopy_v1_ComponentStatusList,
diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go
index 68ac8cc32d4..9719bf5269c 100644
--- a/pkg/api/v1/types.go
+++ b/pkg/api/v1/types.go
@@ -277,6 +277,9 @@ type VolumeSource struct {
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: http://releases.k8s.io/HEAD/examples/rbd/README.md
RBD *RBDVolumeSource `json:"rbd,omitempty"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@@ -322,6 +325,9 @@ type PersistentVolumeSource struct {
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty"`
+ // Cinder represents a cinder volume attached and mounted on kubelets host machine
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ Cinder *CinderVolumeSource `json:"cinder,omitempty"`
}
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
@@ -570,6 +576,24 @@ type RBDVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
+// CinderVolumeSource represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+type CinderVolumeSource struct {
+ // volume id used to identify the volume in cinder
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ VolumeID string `json:"volumeID"`
+ // Required: Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Only ext3 and ext4 are allowed
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ FSType string `json:"fsType,omitempty"`
+ // Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go
index 555c3cc52a9..c16300eacca 100644
--- a/pkg/api/v1/types_swagger_doc_generated.go
+++ b/pkg/api/v1/types_swagger_doc_generated.go
@@ -69,6 +69,17 @@ func (Capabilities) SwaggerDoc() map[string]string {
return map_Capabilities
}
+var map_CinderVolumeSource = map[string]string{
+ "": "CinderVolumeSource represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet.",
+ "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "fsType": "Required: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Only ext3 and ext4 are allowed More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+ "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
+}
+
+func (CinderVolumeSource) SwaggerDoc() map[string]string {
+ return map_CinderVolumeSource
+}
+
var map_ComponentCondition = map[string]string{
"": "Information about the condition of a component.",
"type": "Type of condition for a component. Valid value: \"Healthy\"",
@@ -788,6 +799,7 @@ var map_PersistentVolumeSource = map[string]string{
"nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@@ -1361,7 +1373,8 @@ var map_VolumeSource = map[string]string{
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/iscsi/README.md",
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
- "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
+ "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
+ "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
}
func (VolumeSource) SwaggerDoc() map[string]string {
diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go
index 88290c664c9..27151e92a80 100644
--- a/pkg/api/validation/validation.go
+++ b/pkg/api/validation/validation.go
@@ -375,6 +375,10 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes++
allErrs = append(allErrs, validateRBD(source.RBD).Prefix("rbd")...)
}
+ if source.Cinder != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder).Prefix("cinder")...)
+ }
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
@@ -498,6 +502,17 @@ func validateRBD(rbd *api.RBDVolumeSource) errs.ValidationErrorList {
return allErrs
}
+func validateCinderVolumeSource(cd *api.CinderVolumeSource) errs.ValidationErrorList {
+ allErrs := errs.ValidationErrorList{}
+ if cd.VolumeID == "" {
+ allErrs = append(allErrs, errs.NewFieldRequired("volumeID"))
+ }
+ if cd.FSType == "" || (cd.FSType != "ext3" && cd.FSType != "ext4") {
+ allErrs = append(allErrs, errs.NewFieldRequired("fsType required and should be of type ext3 or ext4"))
+ }
+ return allErrs
+}
+
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
@@ -557,6 +572,10 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) errs.ValidationErrorList
numVolumes++
allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI).Prefix("iscsi")...)
}
+ if pv.Spec.Cinder != nil {
+ numVolumes++
+ allErrs = append(allErrs, validateCinderVolumeSource(pv.Spec.Cinder).Prefix("cinder")...)
+ }
if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", pv.Spec.PersistentVolumeSource, "exactly 1 volume type is required"))
}
diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go
index 0c0a5280983..d99028e9ffa 100644
--- a/pkg/api/validation/validation_test.go
+++ b/pkg/api/validation/validation_test.go
@@ -456,12 +456,13 @@ func TestValidateVolumes(t *testing.T) {
{Name: "secret", VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: "my-secret"}}},
{Name: "glusterfs", VolumeSource: api.VolumeSource{Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "host1", Path: "path", ReadOnly: false}}},
{Name: "rbd", VolumeSource: api.VolumeSource{RBD: &api.RBDVolumeSource{CephMonitors: []string{"foo"}, RBDImage: "bar", FSType: "ext4"}}},
+ {Name: "cinder", VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{"29ea5088-4f60-4757-962e-dba678767887", "ext4", false}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
t.Errorf("expected success: %v", errs)
}
- if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk") {
+ if len(names) != len(successCase) || !names.HasAll("abc", "123", "abc-123", "empty", "gcepd", "gitrepo", "secret", "iscsidisk", "cinder") {
t.Errorf("wrong names result: %v", names)
}
emptyVS := api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}
diff --git a/pkg/cloudprovider/providers/openstack/openstack.go b/pkg/cloudprovider/providers/openstack/openstack.go
index 048c8174053..62d345e64a4 100644
--- a/pkg/cloudprovider/providers/openstack/openstack.go
+++ b/pkg/cloudprovider/providers/openstack/openstack.go
@@ -22,12 +22,16 @@ import (
"io"
"net"
"net/http"
+ ossys "os"
"regexp"
+ "strings"
"time"
"code.google.com/p/gcfg"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack"
+ "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
+ "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
"github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members"
@@ -763,3 +767,157 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
+
+// Attaches given cinder volume to the compute running kubelet
+func (os *OpenStack) AttachDisk(diskName string) (string, error) {
+ disk, err := os.getVolume(diskName)
+ if err != nil {
+ return "", err
+ }
+ cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
+ Region: os.region,
+ })
+ if err != nil || cClient == nil {
+ glog.Errorf("Unable to initialize nova client for region: %s", os.region)
+ return "", err
+ }
+ compute_id, err := os.getComputeIDbyHostname(cClient)
+ if err != nil || len(compute_id) == 0 {
+ glog.Errorf("Unable to get minion's id by minion's hostname")
+ return "", err
+ }
+
+ if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
+ if compute_id == disk.Attachments[0]["server_id"] {
+ glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, compute_id)
+ return disk.ID, nil
+ } else {
+ errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
+ glog.Errorf(errMsg)
+ return "", errors.New(errMsg)
+ }
+ }
+ // add read only flag here if possible spothanis
+ _, err = volumeattach.Create(cClient, compute_id, &volumeattach.CreateOpts{
+ VolumeID: disk.ID,
+ }).Extract()
+ if err != nil {
+ glog.Errorf("Failed to attach %s volume to %s compute", diskName, compute_id)
+ return "", err
+ }
+ glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, compute_id)
+ return disk.ID, nil
+}
+
+// Detaches given cinder volume from the compute running kubelet
+func (os *OpenStack) DetachDisk(partialDiskId string) error {
+ disk, err := os.getVolume(partialDiskId)
+ if err != nil {
+ return err
+ }
+ cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
+ Region: os.region,
+ })
+ if err != nil || cClient == nil {
+ glog.Errorf("Unable to initialize nova client for region: %s", os.region)
+ return err
+ }
+ compute_id, err := os.getComputeIDbyHostname(cClient)
+ if err != nil || len(compute_id) == 0 {
+ glog.Errorf("Unable to get compute id while detaching disk")
+ return err
+ }
+ if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && compute_id == disk.Attachments[0]["server_id"] {
+ // This is a blocking call and effects kubelet's performance directly.
+ // We should consider kicking it out into a separate routine, if it is bad.
+ err = volumeattach.Delete(cClient, compute_id, disk.ID).ExtractErr()
+ if err != nil {
+ glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, compute_id, err)
+ return err
+ }
+ glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, compute_id)
+ } else {
+ errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, compute_id)
+ glog.Errorf(errMsg)
+ return errors.New(errMsg)
+ }
+ return nil
+}
+
+// Takes a partial/full disk id or diskname
+func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
+ sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
+ Region: os.region,
+ })
+
+ var volume volumes.Volume
+ if err != nil || sClient == nil {
+ glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
+ return volume, err
+ }
+
+ err = volumes.List(sClient, nil).EachPage(func(page pagination.Page) (bool, error) {
+ vols, err := volumes.ExtractVolumes(page)
+ if err != nil {
+ glog.Errorf("Failed to extract volumes: %v", err)
+ return false, err
+ } else {
+ for _, v := range vols {
+ glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
+ if v.Name == diskName || strings.Contains(v.ID, diskName) {
+ volume = v
+ return true, nil
+ }
+ }
+ }
+ // if it reached here then no disk with the given name was found.
+ errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, os.region)
+ return false, errors.New(errmsg)
+ })
+ if err != nil {
+ glog.Errorf("Error occured getting volume: %s", diskName)
+ return volume, err
+ }
+ return volume, err
+}
+
+func (os *OpenStack) getComputeIDbyHostname(cClient *gophercloud.ServiceClient) (string, error) {
+
+ hostname, err := ossys.Hostname()
+
+ if err != nil {
+ glog.Errorf("Failed to get Minion's hostname: %v", err)
+ return "", err
+ }
+
+ i, ok := os.Instances()
+ if !ok {
+ glog.Errorf("Unable to get instances")
+ return "", errors.New("Unable to get instances")
+ }
+
+ srvs, err := i.List(".")
+ if err != nil {
+ glog.Errorf("Failed to list servers: %v", err)
+ return "", err
+ }
+
+ if len(srvs) == 0 {
+ glog.Errorf("Found no servers in the region")
+ return "", errors.New("Found no servers in the region")
+ }
+ glog.V(4).Infof("found servers: %v", srvs)
+
+ for _, srvname := range srvs {
+ server, err := getServerByName(cClient, srvname)
+ if err != nil {
+ return "", err
+ } else {
+ if (server.Metadata["hostname"] != nil && server.Metadata["hostname"] == hostname) || (len(server.Name) > 0 && server.Name == hostname) {
+ glog.V(4).Infof("found server: %s with host :%s", server.Name, hostname)
+ return server.ID, nil
+ }
+ }
+ }
+ return "", fmt.Errorf("No server found matching hostname: %s", hostname)
+}
diff --git a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go b/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go
index 360a928c936..9e3846f3718 100644
--- a/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go
+++ b/pkg/controller/persistentvolume/persistentvolume_recycler_controller.go
@@ -20,19 +20,19 @@ import (
"fmt"
"time"
+ "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/cache"
+ "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
- "k8s.io/kubernetes/pkg/volume"
- "k8s.io/kubernetes/pkg/watch"
-
- "github.com/golang/glog"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
+ "k8s.io/kubernetes/pkg/volume"
+ "k8s.io/kubernetes/pkg/watch"
)
// PersistentVolumeRecycler is a controller that watches for PersistentVolumes that are released from their claims.
@@ -229,3 +229,7 @@ func (f *PersistentVolumeRecycler) NewWrapperBuilder(spec *volume.Spec, pod *api
func (f *PersistentVolumeRecycler) NewWrapperCleaner(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
return nil, fmt.Errorf("NewWrapperCleaner not supported by PVClaimBinder's VolumeHost implementation")
}
+
+func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface {
+ return nil
+}
diff --git a/pkg/expapi/deep_copy_generated.go b/pkg/expapi/deep_copy_generated.go
index 92cc7532ed5..f61479a258e 100644
--- a/pkg/expapi/deep_copy_generated.go
+++ b/pkg/expapi/deep_copy_generated.go
@@ -56,6 +56,13 @@ func deepCopy_api_Capabilities(in api.Capabilities, out *api.Capabilities, c *co
return nil
}
+func deepCopy_api_CinderVolumeSource(in api.CinderVolumeSource, out *api.CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func deepCopy_api_Container(in api.Container, out *api.Container, c *conversion.Cloner) error {
out.Name = in.Name
out.Image = in.Image
@@ -662,6 +669,14 @@ func deepCopy_api_VolumeSource(in api.VolumeSource, out *api.VolumeSource, c *co
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(api.CinderVolumeSource)
+ if err := deepCopy_api_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -1045,6 +1060,7 @@ func init() {
err := api.Scheme.AddGeneratedDeepCopyFuncs(
deepCopy_api_AWSElasticBlockStoreVolumeSource,
deepCopy_api_Capabilities,
+ deepCopy_api_CinderVolumeSource,
deepCopy_api_Container,
deepCopy_api_ContainerPort,
deepCopy_api_EmptyDirVolumeSource,
diff --git a/pkg/expapi/v1/conversion_generated.go b/pkg/expapi/v1/conversion_generated.go
index fe8a0a19b27..a6409c8b8e9 100644
--- a/pkg/expapi/v1/conversion_generated.go
+++ b/pkg/expapi/v1/conversion_generated.go
@@ -62,6 +62,16 @@ func convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.C
return nil
}
+func convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*api.CinderVolumeSource))(in)
+ }
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.Container))(in)
@@ -704,6 +714,14 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.V
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(v1.CinderVolumeSource)
+ if err := convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -741,6 +759,16 @@ func convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.C
return nil
}
+func convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error {
+ if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
+ defaulting.(func(*v1.CinderVolumeSource))(in)
+ }
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*v1.Container))(in)
@@ -1383,6 +1411,14 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.V
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(api.CinderVolumeSource)
+ if err := convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in.Cinder, out.Cinder, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -2132,6 +2168,7 @@ func init() {
err := api.Scheme.AddGeneratedConversionFuncs(
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
convert_api_Capabilities_To_v1_Capabilities,
+ convert_api_CinderVolumeSource_To_v1_CinderVolumeSource,
convert_api_ContainerPort_To_v1_ContainerPort,
convert_api_Container_To_v1_Container,
convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource,
@@ -2189,6 +2226,7 @@ func init() {
convert_v1_APIVersion_To_expapi_APIVersion,
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Capabilities_To_api_Capabilities,
+ convert_v1_CinderVolumeSource_To_api_CinderVolumeSource,
convert_v1_ContainerPort_To_api_ContainerPort,
convert_v1_Container_To_api_Container,
convert_v1_DaemonList_To_expapi_DaemonList,
diff --git a/pkg/expapi/v1/deep_copy_generated.go b/pkg/expapi/v1/deep_copy_generated.go
index 6b9e827097a..a73e1b458d2 100644
--- a/pkg/expapi/v1/deep_copy_generated.go
+++ b/pkg/expapi/v1/deep_copy_generated.go
@@ -73,6 +73,13 @@ func deepCopy_v1_Capabilities(in v1.Capabilities, out *v1.Capabilities, c *conve
return nil
}
+func deepCopy_v1_CinderVolumeSource(in v1.CinderVolumeSource, out *v1.CinderVolumeSource, c *conversion.Cloner) error {
+ out.VolumeID = in.VolumeID
+ out.FSType = in.FSType
+ out.ReadOnly = in.ReadOnly
+ return nil
+}
+
func deepCopy_v1_Container(in v1.Container, out *v1.Container, c *conversion.Cloner) error {
out.Name = in.Name
out.Image = in.Image
@@ -680,6 +687,14 @@ func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conve
} else {
out.RBD = nil
}
+ if in.Cinder != nil {
+ out.Cinder = new(v1.CinderVolumeSource)
+ if err := deepCopy_v1_CinderVolumeSource(*in.Cinder, out.Cinder, c); err != nil {
+ return err
+ }
+ } else {
+ out.Cinder = nil
+ }
return nil
}
@@ -1053,6 +1068,7 @@ func init() {
deepCopy_resource_Quantity,
deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Capabilities,
+ deepCopy_v1_CinderVolumeSource,
deepCopy_v1_Container,
deepCopy_v1_ContainerPort,
deepCopy_v1_EmptyDirVolumeSource,
diff --git a/pkg/kubelet/volumes.go b/pkg/kubelet/volumes.go
index da60a7c84ea..807381112fc 100644
--- a/pkg/kubelet/volumes.go
+++ b/pkg/kubelet/volumes.go
@@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/cloudprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
@@ -79,6 +80,10 @@ func (vh *volumeHost) NewWrapperCleaner(spec *volume.Spec, podUID types.UID, mou
return c, nil
}
+func (vh *volumeHost) GetCloudProvider() cloudprovider.Interface {
+ return vh.kubelet.cloud
+}
+
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil {
diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go
new file mode 100644
index 00000000000..be998ff6cab
--- /dev/null
+++ b/pkg/volume/cinder/cinder.go
@@ -0,0 +1,281 @@
+/*
+Copyright 2015 The Kubernetes Authors All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinder
+
+import (
+ "os"
+ "path"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util"
+ "k8s.io/kubernetes/pkg/util/exec"
+ "k8s.io/kubernetes/pkg/util/mount"
+ "k8s.io/kubernetes/pkg/volume"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.VolumePlugin {
+ return []volume.VolumePlugin{&cinderPlugin{nil}}
+}
+
+type cinderPlugin struct {
+ host volume.VolumeHost
+}
+
+var _ volume.VolumePlugin = &cinderPlugin{}
+
+const (
+ cinderVolumePluginName = "kubernetes.io/cinder"
+)
+
+func (plugin *cinderPlugin) Init(host volume.VolumeHost) {
+ plugin.host = host
+}
+
+func (plugin *cinderPlugin) Name() string {
+ return cinderVolumePluginName
+}
+
+func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
+ return spec.PersistentVolumeSource.Cinder != nil || spec.VolumeSource.Cinder != nil
+}
+
+func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
+ return []api.PersistentVolumeAccessMode{
+ api.ReadWriteOnce,
+ }
+}
+
+func (plugin *cinderPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
+ return plugin.newBuilderInternal(spec, pod.UID, &CinderDiskUtil{}, mounter)
+}
+
+func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {
+ var cinder *api.CinderVolumeSource
+ if spec.VolumeSource.Cinder != nil {
+ cinder = spec.VolumeSource.Cinder
+ } else {
+ cinder = spec.PersistentVolumeSource.Cinder
+ }
+
+ pdName := cinder.VolumeID
+ fsType := cinder.FSType
+ readOnly := cinder.ReadOnly
+
+ return &cinderVolumeBuilder{
+ cinderVolume: &cinderVolume{
+ podUID: podUID,
+ volName: spec.Name,
+ pdName: pdName,
+ mounter: mounter,
+ manager: manager,
+ plugin: plugin,
+ },
+ fsType: fsType,
+ readOnly: readOnly,
+ blockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil
+}
+
+func (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
+ return plugin.newCleanerInternal(volName, podUID, &CinderDiskUtil{}, mounter)
+}
+
+func (plugin *cinderPlugin) newCleanerInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Cleaner, error) {
+ return &cinderVolumeCleaner{
+ &cinderVolume{
+ podUID: podUID,
+ volName: volName,
+ manager: manager,
+ mounter: mounter,
+ plugin: plugin,
+ }}, nil
+}
+
+// Abstract interface to PD operations.
+type cdManager interface {
+ // Attaches the disk to the kubelet's host machine.
+ AttachDisk(builder *cinderVolumeBuilder, globalPDPath string) error
+ // Detaches the disk from the kubelet's host machine.
+ DetachDisk(cleaner *cinderVolumeCleaner) error
+}
+
+var _ volume.Builder = &cinderVolumeBuilder{}
+
+type cinderVolumeBuilder struct {
+ *cinderVolume
+ fsType string
+ readOnly bool
+ blockDeviceMounter mount.Interface
+}
+
+// cinderPersistentDisk volumes are disk resources provided by C3
+// that are attached to the kubelet's host machine and exposed to the pod.
+type cinderVolume struct {
+ volName string
+ podUID types.UID
+ // Unique identifier of the volume, used to find the disk resource in the provider.
+ pdName string
+ // Filesystem type, optional.
+ fsType string
+ // Specifies the partition to mount
+ //partition string
+ // Specifies whether the disk will be attached as read-only.
+ readOnly bool
+ // Utility interface that provides API calls to the provider to attach/detach disks.
+ manager cdManager
+ // Mounter interface that provides system calls to mount the global path to the pod local path.
+ mounter mount.Interface
+ // diskMounter provides the interface that is used to mount the actual block device.
+ blockDeviceMounter mount.Interface
+ plugin *cinderPlugin
+}
+
+func detachDiskLogError(cd *cinderVolume) {
+ err := cd.manager.DetachDisk(&cinderVolumeCleaner{cd})
+ if err != nil {
+ glog.Warningf("Failed to detach disk: %v (%v)", cd, err)
+ }
+}
+
+func (b *cinderVolumeBuilder) SetUp() error {
+ return b.SetUpAt(b.GetPath())
+}
+
+// SetUp attaches the disk and bind mounts to the volume path.
+func (b *cinderVolumeBuilder) SetUpAt(dir string) error {
+ // TODO: handle failed mounts here.
+ notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
+ glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notmnt, err)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if !notmnt {
+ return nil
+ }
+ globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
+ if err := b.manager.AttachDisk(b, globalPDPath); err != nil {
+ return err
+ }
+
+ options := []string{"bind"}
+ if b.readOnly {
+ options = append(options, "ro")
+ }
+
+ if err := os.MkdirAll(dir, 0750); err != nil {
+ // TODO: we should really eject the attach/detach out into its own control loop.
+ detachDiskLogError(b.cinderVolume)
+ return err
+ }
+
+ // Perform a bind mount to the full path to allow duplicate mounts of the same PD.
+ err = b.mounter.Mount(globalPDPath, dir, "", options)
+ if err != nil {
+ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+ if mntErr != nil {
+ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+ return err
+ }
+ if !notmnt {
+ if mntErr = b.mounter.Unmount(dir); mntErr != nil {
+ glog.Errorf("Failed to unmount: %v", mntErr)
+ return err
+ }
+ notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
+ if mntErr != nil {
+ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+ return err
+ }
+ if !notmnt {
+ // This is very odd, we don't expect it. We'll try again next sync loop.
+ glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
+ return err
+ }
+ }
+ os.Remove(dir)
+ // TODO: we should really eject the attach/detach out into its own control loop.
+ detachDiskLogError(b.cinderVolume)
+ return err
+ }
+
+ return nil
+}
+
+func (b *cinderVolumeBuilder) IsReadOnly() bool {
+ return b.readOnly
+}
+
+func makeGlobalPDName(host volume.VolumeHost, devName string) string {
+ return path.Join(host.GetPluginDir(cinderVolumePluginName), "mounts", devName)
+}
+
+func (cd *cinderVolume) GetPath() string {
+ name := cinderVolumePluginName
+ return cd.plugin.host.GetPodVolumeDir(cd.podUID, util.EscapeQualifiedNameForDisk(name), cd.volName)
+}
+
+type cinderVolumeCleaner struct {
+ *cinderVolume
+}
+
+var _ volume.Cleaner = &cinderVolumeCleaner{}
+
+func (c *cinderVolumeCleaner) TearDown() error {
+ return c.TearDownAt(c.GetPath())
+}
+
+// Unmounts the bind mount, and detaches the disk only if the PD
+// resource was the last reference to that disk on the kubelet.
+func (c *cinderVolumeCleaner) TearDownAt(dir string) error {
+ notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
+ if err != nil {
+ return err
+ }
+ if notmnt {
+ return os.Remove(dir)
+ }
+ refs, err := mount.GetMountRefs(c.mounter, dir)
+ if err != nil {
+ return err
+ }
+ if err := c.mounter.Unmount(dir); err != nil {
+ return err
+ }
+ glog.Infof("successfully unmounted: %s\n", dir)
+
+ // If refCount is 1, then all bind mounts have been removed, and the
+ // remaining reference is the global mount. It is safe to detach.
+ if len(refs) == 1 {
+ c.pdName = path.Base(refs[0])
+ if err := c.manager.DetachDisk(c); err != nil {
+ return err
+ }
+ }
+ notmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
+ if mntErr != nil {
+ glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
+ return err
+ }
+ if !notmnt {
+ if err := os.Remove(dir); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/volume/cinder/cinder_test.go b/pkg/volume/cinder/cinder_test.go
new file mode 100644
index 00000000000..f3389bc902f
--- /dev/null
+++ b/pkg/volume/cinder/cinder_test.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2015 The Kubernetes Authors All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinder
+
+import (
+ "os"
+ "testing"
+
+ "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/types"
+ "k8s.io/kubernetes/pkg/util/mount"
+ "k8s.io/kubernetes/pkg/volume"
+)
+
+func TestCanSupport(t *testing.T) {
+ plugMgr := volume.VolumePluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "kubernetes.io/cinder" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if !plug.CanSupport(&volume.Spec{
+ Name: "foo",
+ VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{}}}) {
+ t.Errorf("Expected true")
+ }
+
+ if !plug.CanSupport(&volume.Spec{Name: "foo", PersistentVolumeSource: api.PersistentVolumeSource{Cinder: &api.CinderVolumeSource{}}}) {
+ t.Errorf("Expected true")
+ }
+}
+
+type fakePDManager struct{}
+
+func (fake *fakePDManager) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error {
+ globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
+ err := os.MkdirAll(globalPath, 0750)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fake *fakePDManager) DetachDisk(c *cinderVolumeCleaner) error {
+ globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
+ err := os.RemoveAll(globalPath)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func TestPlugin(t *testing.T) {
+ plugMgr := volume.VolumePluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ VolumeSource: api.VolumeSource{
+ Cinder: &api.CinderVolumeSource{
+ VolumeID: "pd",
+ FSType: "ext4",
+ },
+ },
+ }
+ builder, err := plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~cinder/vol1" {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+
+ if err := builder.SetUp(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+
+ cleaner, err := plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+
+ if err := cleaner.TearDown(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err == nil {
+ t.Errorf("TearDown() failed, volume path still exists: %s", path)
+ } else if !os.IsNotExist(err) {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+}
diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go
new file mode 100644
index 00000000000..685333a436e
--- /dev/null
+++ b/pkg/volume/cinder/cinder_util.go
@@ -0,0 +1,212 @@
+/*
+Copyright 2015 The Kubernetes Authors All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinder
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/golang/glog"
+ "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
+ "k8s.io/kubernetes/pkg/util/exec"
+ "k8s.io/kubernetes/pkg/util/mount"
+)
+
+type CinderDiskUtil struct{}
+
+// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
+// Mounts the disk to it's global path.
+func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error {
+ options := []string{}
+ if b.readOnly {
+ options = append(options, "ro")
+ }
+ cloud := b.plugin.host.GetCloudProvider()
+ if cloud == nil {
+ glog.Errorf("Cloud provider not initialized properly")
+ return errors.New("Cloud provider not initialized properly")
+ }
+ diskid, err := cloud.(*openstack.OpenStack).AttachDisk(b.pdName)
+ if err != nil {
+ return err
+ }
+
+ var devicePath string
+ numTries := 0
+ for {
+ devicePath = makeDevicePath(diskid)
+ // probe the attached vol so that symlink in /dev/disk/by-id is created
+ probeAttachedVolume()
+
+ _, err := os.Stat(devicePath)
+ if err == nil {
+ break
+ }
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ numTries++
+ if numTries == 10 {
+ return errors.New("Could not attach disk: Timeout after 60s")
+ }
+ time.Sleep(time.Second * 6)
+ }
+
+ notmnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ if err := os.MkdirAll(globalPDPath, 0750); err != nil {
+ return err
+ }
+ notmnt = true
+ } else {
+ return err
+ }
+ }
+ if notmnt {
+ err = b.blockDeviceMounter.Mount(devicePath, globalPDPath, b.fsType, options)
+ if err != nil {
+ os.Remove(globalPDPath)
+ return err
+ }
+ glog.V(2).Infof("Safe mount successful: %q\n", devicePath)
+ }
+ return nil
+}
+
+func makeDevicePath(diskid string) string {
+ files, _ := ioutil.ReadDir("/dev/disk/by-id/")
+ for _, f := range files {
+ if strings.Contains(f.Name(), "virtio-") {
+ devid_prefix := f.Name()[len("virtio-"):len(f.Name())]
+ if strings.Contains(diskid, devid_prefix) {
+ glog.V(4).Infof("Found disk attached as %q; full devicepath: %s\n", f.Name(), path.Join("/dev/disk/by-id/", f.Name()))
+ return path.Join("/dev/disk/by-id/", f.Name())
+ }
+ }
+ }
+ glog.Warningf("Failed to find device for the diskid: %q\n", diskid)
+ return ""
+}
+
+// Unmounts the device and detaches the disk from the kubelet's host machine.
+func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeCleaner) error {
+ globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
+ if err := cd.mounter.Unmount(globalPDPath); err != nil {
+ return err
+ }
+ if err := os.Remove(globalPDPath); err != nil {
+ return err
+ }
+ glog.V(2).Infof("Successfully unmounted main device: %s\n", globalPDPath)
+
+ cloud := cd.plugin.host.GetCloudProvider()
+ if cloud == nil {
+ glog.Errorf("Cloud provider not initialized properly")
+ return errors.New("Cloud provider not initialized properly")
+ }
+
+ if err := cloud.(*openstack.OpenStack).DetachDisk(cd.pdName); err != nil {
+ return err
+ }
+ glog.V(2).Infof("Successfully detached cinder volume %s", cd.pdName)
+ return nil
+}
+
+type cinderSafeFormatAndMount struct {
+ mount.Interface
+ runner exec.Interface
+}
+
+/*
+The functions below depend on the following executables; This will have to be ported to more generic implementations
+/bin/lsblk
+/sbin/mkfs.ext3 or /sbin/mkfs.ext4
+/usr/bin/udevadm
+*/
+func (diskmounter *cinderSafeFormatAndMount) Mount(device string, target string, fstype string, options []string) error {
+ fmtRequired, err := isFormatRequired(device, fstype, diskmounter)
+ if err != nil {
+ glog.Warningf("Failed to determine if formating is required: %v\n", err)
+ //return err
+ }
+ if fmtRequired {
+ glog.V(2).Infof("Formatting of the vol required")
+ if _, err := formatVolume(device, fstype, diskmounter); err != nil {
+ glog.Warningf("Failed to format volume: %v\n", err)
+ return err
+ }
+ }
+ return diskmounter.Interface.Mount(device, target, fstype, options)
+}
+
+func isFormatRequired(devicePath string, fstype string, exec *cinderSafeFormatAndMount) (bool, error) {
+ args := []string{"-f", devicePath}
+ glog.V(4).Infof("exec-ing: /bin/lsblk %v\n", args)
+ cmd := exec.runner.Command("/bin/lsblk", args...)
+ dataOut, err := cmd.CombinedOutput()
+ if err != nil {
+ glog.Warningf("error running /bin/lsblk\n%s", string(dataOut))
+ return false, err
+ }
+ if len(string(dataOut)) > 0 {
+ if strings.Contains(string(dataOut), fstype) {
+ return false, nil
+ } else {
+ return true, nil
+ }
+ } else {
+ glog.Warningf("Failed to get any response from /bin/lsblk")
+ return false, errors.New("Failed to get reponse from /bin/lsblk")
+ }
+ glog.Warningf("Unknown error occured executing /bin/lsblk")
+ return false, errors.New("Unknown error occured executing /bin/lsblk")
+}
+
+func formatVolume(devicePath string, fstype string, exec *cinderSafeFormatAndMount) (bool, error) {
+ if "ext4" != fstype && "ext3" != fstype {
+ glog.Warningf("Unsupported format type: %q\n", fstype)
+ return false, errors.New(fmt.Sprint("Unsupported format type: %q\n", fstype))
+ }
+ args := []string{devicePath}
+ cmd := exec.runner.Command(fmt.Sprintf("/sbin/mkfs.%s", fstype), args...)
+ dataOut, err := cmd.CombinedOutput()
+ if err != nil {
+ glog.Warningf("error running /sbin/mkfs for fstype: %q \n%s", fstype, string(dataOut))
+ return false, err
+ }
+ glog.V(2).Infof("Successfully formated device: %q with fstype %q; output:\n %q\n,", devicePath, fstype, string(dataOut))
+ return true, err
+}
+
+func probeAttachedVolume() error {
+ executor := exec.New()
+ args := []string{"trigger"}
+ cmd := executor.Command("/usr/bin/udevadm", args...)
+ _, err := cmd.CombinedOutput()
+ if err != nil {
+ glog.Errorf("error running udevadm trigger %v\n", err)
+ return err
+ }
+ glog.V(4).Infof("Successfully probed all attachments")
+ return nil
+}
diff --git a/pkg/volume/cinder/cinder_util_test.go b/pkg/volume/cinder/cinder_util_test.go
new file mode 100644
index 00000000000..44ea17c05a1
--- /dev/null
+++ b/pkg/volume/cinder/cinder_util_test.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2015 The Kubernetes Authors All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cinder
+
+import (
+ "testing"
+
+ "k8s.io/kubernetes/pkg/util/exec"
+ "k8s.io/kubernetes/pkg/util/mount"
+)
+
+func TestSafeFormatAndMount(t *testing.T) {
+ tests := []struct {
+ fstype string
+ expectedArgs []string
+ err error
+ }{
+ {
+ fstype: "ext4",
+ expectedArgs: []string{"/dev/foo", "/mnt/bar"},
+ },
+ {
+ fstype: "ext3",
+ expectedArgs: []string{"/dev/foo/blah", "/mnt/bar/blah"},
+ },
+ }
+ for _, test := range tests {
+
+ var cmdOut string
+ var argsOut []string
+ fake := exec.FakeExec{
+ CommandScript: []exec.FakeCommandAction{
+ func(cmd string, args ...string) exec.Cmd {
+ cmdOut = cmd
+ argsOut = args
+ fake := exec.FakeCmd{
+ CombinedOutputScript: []exec.FakeCombinedOutputAction{
+ func() ([]byte, error) { return []byte{}, test.err },
+ },
+ }
+ return exec.InitFakeCmd(&fake, cmd, args...)
+ },
+ },
+ }
+
+ mounter := cinderSafeFormatAndMount{
+ &mount.FakeMounter{},
+ &fake,
+ }
+
+ err := mounter.Mount("/dev/foo", "/mnt/bar", test.fstype, nil)
+ if test.err == nil && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if test.err != nil {
+ if err == nil {
+ t.Errorf("unexpected non-error")
+ }
+ return
+ }
+ if cmdOut != "/bin/lsblk" {
+ t.Errorf("unexpected command: %s", cmdOut)
+ }
+ if len(argsOut) != len(test.expectedArgs) {
+ t.Errorf("unexpected args: %v, expected: %v", argsOut, test.expectedArgs)
+ }
+ }
+}
diff --git a/pkg/volume/cinder/doc.go b/pkg/volume/cinder/doc.go
new file mode 100644
index 00000000000..a3c5da052f7
--- /dev/null
+++ b/pkg/volume/cinder/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 The Kubernetes Authors All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cinder contains the internal representation of cinder volumes.
+package cinder
diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go
index afd221c7ee0..02c1bd88882 100644
--- a/pkg/volume/plugins.go
+++ b/pkg/volume/plugins.go
@@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/errors"
@@ -121,6 +122,9 @@ type VolumeHost interface {
// the provided spec. See comments on NewWrapperBuilder for more
// context.
NewWrapperCleaner(spec *Spec, podUID types.UID, mounter mount.Interface) (Cleaner, error)
+
+ //Get cloud provider from kubelet
+ GetCloudProvider() cloudprovider.Interface
}
// VolumePluginMgr tracks registered plugins.
diff --git a/pkg/volume/testing.go b/pkg/volume/testing.go
index f9406bff945..919fe4b873b 100644
--- a/pkg/volume/testing.go
+++ b/pkg/volume/testing.go
@@ -22,6 +22,7 @@ import (
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/mount"
@@ -32,10 +33,11 @@ type fakeVolumeHost struct {
rootDir string
kubeClient client.Interface
pluginMgr VolumePluginMgr
+ cloud cloudprovider.Interface
}
func NewFakeVolumeHost(rootDir string, kubeClient client.Interface, plugins []VolumePlugin) *fakeVolumeHost {
- host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient}
+ host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil}
host.pluginMgr.InitPlugins(plugins, host)
return host
}
@@ -56,6 +58,10 @@ func (f *fakeVolumeHost) GetKubeClient() client.Interface {
return f.kubeClient
}
+func (f *fakeVolumeHost) GetCloudProvider() cloudprovider.Interface {
+ return f.cloud
+}
+
func (f *fakeVolumeHost) NewWrapperBuilder(spec *Spec, pod *api.Pod, opts VolumeOptions, mounter mount.Interface) (Builder, error) {
plug, err := f.pluginMgr.FindPluginBySpec(spec)
if err != nil {