mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
CephFS: Add subdir Option for Changing Mounted Root Directory
This commit is contained in:
parent
171c2ecbe7
commit
1373e0139d
@ -45,6 +45,7 @@ Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*.
|
||||
Once you have installed Ceph and a Kubernetes cluster, you can create a pod based on my examples [cephfs.yaml](cephfs.yaml) and [cephfs-with-secret.yaml](cephfs-with-secret.yaml). In the pod yaml, you need to provide the following information.
|
||||
|
||||
- *monitors*: Array of Ceph monitors.
|
||||
- *path*: Used as the mounted root, rather than the full Ceph tree. If not provided, default */* is used.
|
||||
- *user*: The RADOS user name. If not provided, default *admin* is used.
|
||||
- *secretFile*: The path to the keyring file. If not provided, default */etc/ceph/user.secret* is used.
|
||||
- *secretRef*: Reference to Ceph authentication secrets. If provided, *secret* overrides *secretFile*.
|
||||
|
@ -637,6 +637,8 @@ type CinderVolumeSource struct {
|
||||
type CephFSVolumeSource struct {
|
||||
// Required: Monitors is a collection of Ceph monitors
|
||||
Monitors []string `json:"monitors"`
|
||||
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
|
||||
Path string `json:"path,omitempty"`
|
||||
// Optional: User is the rados user name, default is admin
|
||||
User string `json:"user,omitempty"`
|
||||
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
|
||||
|
@ -590,6 +590,8 @@ type CephFSVolumeSource struct {
|
||||
// Required: Monitors is a collection of Ceph monitors
|
||||
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
|
||||
Monitors []string `json:"monitors"`
|
||||
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
|
||||
Path string `json:"path,omitempty"`
|
||||
// Optional: User is the rados user name, default is admin
|
||||
// More info: http://releases.k8s.io/HEAD/examples/cephfs/README.md#how-to-use-it
|
||||
User string `json:"user,omitempty"`
|
||||
|
@ -19,12 +19,13 @@ package cephfs
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@ -92,6 +93,13 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
|
||||
if id == "" {
|
||||
id = "admin"
|
||||
}
|
||||
path := cephvs.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
path = "/" + path
|
||||
}
|
||||
secret_file := cephvs.SecretFile
|
||||
if secret_file == "" {
|
||||
secret_file = "/etc/ceph/" + id + ".secret"
|
||||
@ -102,6 +110,7 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
mon: cephvs.Monitors,
|
||||
path: path,
|
||||
secret: secret,
|
||||
id: id,
|
||||
secret_file: secret_file,
|
||||
@ -138,6 +147,7 @@ type cephfs struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
mon []string
|
||||
path string
|
||||
id string
|
||||
secret string
|
||||
secret_file string
|
||||
@ -208,7 +218,7 @@ func (cephfsVolume *cephfsCleaner) TearDownAt(dir string) error {
|
||||
// GatePath creates global mount path
|
||||
func (cephfsVolume *cephfs) GetPath() string {
|
||||
name := cephfsPluginName
|
||||
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, strings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
return cephfsVolume.plugin.host.GetPodVolumeDir(cephfsVolume.podUID, utilstrings.EscapeQualifiedNameForDisk(name), cephfsVolume.volName)
|
||||
}
|
||||
|
||||
func (cephfsVolume *cephfs) cleanup(dir string) error {
|
||||
@ -261,7 +271,7 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
||||
for i = 0; i < l-1; i++ {
|
||||
src += hosts[i] + ","
|
||||
}
|
||||
src += hosts[i] + ":/"
|
||||
src += hosts[i] + ":" + cephfsVolume.path
|
||||
|
||||
if err := cephfsVolume.mounter.Mount(src, mountpoint, "ceph", opt); err != nil {
|
||||
return fmt.Errorf("CephFS: mount failed: %v", err)
|
||||
|
Loading…
Reference in New Issue
Block a user