CephFS: Add subdir Option for Changing Mounted Root Directory

This commit is contained in:
Michael Schmidt 2016-02-01 22:01:19 +01:00
parent 171c2ecbe7
commit 1373e0139d
4 changed files with 18 additions and 3 deletions

View File

@ -45,6 +45,7 @@ Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*.
Once you have installed Ceph and a Kubernetes cluster, you can create a pod based on my examples [cephfs.yaml](cephfs.yaml) and [cephfs-with-secret.yaml](cephfs-with-secret.yaml). In the pod yaml, you need to provide the following information. Once you have installed Ceph and a Kubernetes cluster, you can create a pod based on my examples [cephfs.yaml](cephfs.yaml) and [cephfs-with-secret.yaml](cephfs-with-secret.yaml). In the pod yaml, you need to provide the following information.
- *monitors*: Array of Ceph monitors. - *monitors*: Array of Ceph monitors.
- *path*: Used as the mounted root, rather than the full Ceph tree. If not provided, default */* is used.
- *user*: The RADOS user name. If not provided, default *admin* is used. - *user*: The RADOS user name. If not provided, default *admin* is used.
- *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used. - *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used.
- *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*. - *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*.

View File

@ -637,6 +637,8 @@ type CinderVolumeSource struct {
type CephFSVolumeSource struct { type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors // Required: Monitors is a collection of Ceph monitors
Monitors []string `json:"monitors"` Monitors []string `json:"monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
Path string `json:"path,omitempty"`
// Optional: User is the rados user name, default is admin // Optional: User is the rados user name, default is admin
User string `json:"user,omitempty"` User string `json:"user,omitempty"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret

View File

@ -590,6 +590,8 @@ type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors // Required: Monitors is a collection of Ceph monitors
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors"` Monitors []string `json:"monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
Path string `json:"path,omitempty"`
// Optional: User is the rados user name, default is admin // Optional: User is the rados user name, default is admin
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it // More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
User string `json:"user,omitempty"` User string `json:"user,omitempty"`

View File

@ -19,12 +19,13 @@ package cephfs
import ( import (
"fmt" "fmt"
"os" "os"
"strings"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/strings" utilstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
) )
@ -92,6 +93,13 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
if id == "" { if id == "" {
id = "admin" id = "admin"
} }
path := cephvs.Path
if path == "" {
path = "/"
}
if !strings.HasPrefix(path, "/") {
path = "/" + path
}
secret_file := cephvs.SecretFile secret_file := cephvs.SecretFile
if secret_file == "" { if secret_file == "" {
secret_file = "/etc/ceph/" + id + ".secret" secret_file = "/etc/ceph/" + id + ".secret"
@ -102,6 +110,7 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
mon: cephvs.Monitors, mon: cephvs.Monitors,
path: path,
secret: secret, secret: secret,
id: id, id: id,
secret_file: secret_file, secret_file: secret_file,
@ -138,6 +147,7 @@ type cephfs struct {
volName string volName string
podUID types.UID podUID types.UID
mon []string mon []string
path string
id string id string
secret string secret string
secret_file string secret_file string
@ -208,7 +218,7 @@ func (cephfsVolume *cephfsCleaner) TearDownAt(dir string) error {
// GatePath creates global mount path // GatePath creates global mount path
func (cephfsVolume *cephfs) GetPath() string { func (cephfsVolume *cephfs) GetPath() string {
name := cephfsPluginName name := cephfsPluginName
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, strings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName) return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
} }
func (cephfsVolume *cephfs) cleanup(dir string) error { func (cephfsVolume *cephfs) cleanup(dir string) error {
@ -261,7 +271,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
for i = 0; i < l-1; i++ { for i = 0; i < l-1; i++ {
src += hosts[i] + "," src += hosts[i] + ","
} }
src += hosts[i] + ":/" src += hosts[i] + ":" + cephfsVolume.path
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil { if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
return fmt.Errorf("CephFS: mount failed: %v", err) return fmt.Errorf("CephFS: mount failed: %v", err)