refector rbd volume to seperate builder and cleaner

This commit is contained in:
jiangyaoguo 2015-07-24 17:20:42 +08:00
parent 5bd82ffe6d
commit 72e899e1b5
4 changed files with 105 additions and 92 deletions

View File

@ -33,14 +33,14 @@ import (
type diskManager interface { type diskManager interface {
MakeGlobalPDName(disk rbd) string MakeGlobalPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachDisk(disk rbd) error AttachDisk(disk rbdBuilder) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(disk rbd, mntPath string) error DetachDisk(disk rbdCleaner, mntPath string) error
} }
// utility to mount a disk based filesystem // utility to mount a disk based filesystem
func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error { func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount.Interface) error {
globalPDPath := manager.MakeGlobalPDName(disk) globalPDPath := manager.MakeGlobalPDName(*b.rbd)
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
mountpoint, err := mounter.IsMountPoint(volPath) mountpoint, err := mounter.IsMountPoint(volPath)
@ -51,7 +51,7 @@ func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Inte
if mountpoint { if mountpoint {
return nil return nil
} }
if err := manager.AttachDisk(disk); err != nil { if err := manager.AttachDisk(b); err != nil {
glog.Errorf("failed to attach disk") glog.Errorf("failed to attach disk")
return err return err
} }
@ -62,7 +62,7 @@ func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Inte
} }
// Perform a bind mount to the full path to allow duplicate mounts of the same disk. // Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"} options := []string{"bind"}
if disk.ReadOnly { if b.ReadOnly {
options = append(options, "ro") options = append(options, "ro")
} }
err = mounter.Mount(globalPDPath, volPath, "", options) err = mounter.Mount(globalPDPath, volPath, "", options)
@ -74,7 +74,7 @@ func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Inte
} }
// utility to tear down a disk based filesystem // utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error { func diskTearDown(manager diskManager, c rbdCleaner, volPath string, mounter mount.Interface) error {
mountpoint, err := mounter.IsMountPoint(volPath) mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil { if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath) glog.Errorf("cannot validate mountpoint %s", volPath)
@ -97,7 +97,7 @@ func diskTearDown(manager diskManager, disk rbd, volPath string, mounter mount.I
// remaining reference is the global mount. It is safe to detach. // remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 { if len(refs) == 1 {
mntPath := refs[0] mntPath := refs[0]
if err := manager.DetachDisk(disk, mntPath); err != nil { if err := manager.DetachDisk(c, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath) glog.Errorf("failed to detach disk from %s", mntPath)
return err return err
} }

View File

@ -120,20 +120,22 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
keyring = "/etc/ceph/keyring" keyring = "/etc/ceph/keyring"
} }
return &rbd{ return &rbdBuilder{
podUID: podUID, rbd: &rbd{
volName: spec.Name, podUID: podUID,
Mon: source.CephMonitors, volName: spec.Name,
Image: source.RBDImage, Image: source.RBDImage,
Pool: pool, Pool: pool,
Id: id, ReadOnly: source.ReadOnly,
Keyring: keyring, manager: manager,
Secret: secret, mounter: mounter,
fsType: source.FSType, plugin: plugin,
ReadOnly: source.ReadOnly, },
manager: manager, Mon: source.CephMonitors,
mounter: mounter, Id: id,
plugin: plugin, Keyring: keyring,
Secret: secret,
fsType: source.FSType,
}, nil }, nil
} }
@ -143,26 +145,20 @@ func (plugin *rbdPlugin) NewCleaner(volName string, podUID types.UID, mounter mo
} }
func (plugin *rbdPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *rbdPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) {
return &rbd{ return &rbdCleaner{&rbd{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
manager: manager, manager: manager,
mounter: mounter, mounter: mounter,
plugin: plugin, plugin: plugin,
}, nil }}, nil
} }
type rbd struct { type rbd struct {
volName string volName string
podUID types.UID podUID types.UID
// capitalized so they can be exported in persistRBD()
Mon []string
Pool string Pool string
Id string
Image string Image string
Keyring string
Secret string
fsType string
ReadOnly bool ReadOnly bool
plugin *rbdPlugin plugin *rbdPlugin
mounter mount.Interface mounter mount.Interface
@ -176,37 +172,55 @@ func (rbd *rbd) GetPath() string {
return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, util.EscapeQualifiedNameForDisk(name), rbd.volName) return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, util.EscapeQualifiedNameForDisk(name), rbd.volName)
} }
func (rbd *rbd) SetUp() error { type rbdBuilder struct {
return rbd.SetUpAt(rbd.GetPath()) *rbd
// capitalized so they can be exported in persistRBD()
Mon []string
Id string
Keyring string
Secret string
fsType string
} }
func (rbd *rbd) SetUpAt(dir string) error { var _ volume.Builder = &rbdBuilder{}
func (b *rbdBuilder) SetUp() error {
return b.SetUpAt(b.GetPath())
}
func (b *rbdBuilder) SetUpAt(dir string) error {
// diskSetUp checks mountpoints and prevent repeated calls // diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(rbd.manager, *rbd, dir, rbd.mounter) err := diskSetUp(b.manager, *b, dir, b.mounter)
if err != nil { if err != nil {
glog.Errorf("rbd: failed to setup") glog.Errorf("rbd: failed to setup")
return err return err
} }
globalPDPath := rbd.manager.MakeGlobalPDName(*rbd) globalPDPath := b.manager.MakeGlobalPDName(*b.rbd)
// make mountpoint rw/ro work as expected // make mountpoint rw/ro work as expected
//FIXME revisit pkg/util/mount and ensure rw/ro is implemented as expected //FIXME revisit pkg/util/mount and ensure rw/ro is implemented as expected
mode := "rw" mode := "rw"
if rbd.ReadOnly { if b.ReadOnly {
mode = "ro" mode = "ro"
} }
rbd.plugin.execCommand("mount", []string{"-o", "remount," + mode, globalPDPath, dir}) b.plugin.execCommand("mount", []string{"-o", "remount," + mode, globalPDPath, dir})
return nil return nil
} }
// Unmounts the bind mount, and detaches the disk only if the disk type rbdCleaner struct {
// resource was the last reference to that disk on the kubelet. *rbd
func (rbd *rbd) TearDown() error {
return rbd.TearDownAt(rbd.GetPath())
} }
func (rbd *rbd) TearDownAt(dir string) error { var _ volume.Cleaner = &rbdCleaner{}
return diskTearDown(rbd.manager, *rbd, dir, rbd.mounter)
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (c *rbdCleaner) TearDown() error {
return c.TearDownAt(c.GetPath())
}
func (c *rbdCleaner) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter)
} }
func (plugin *rbdPlugin) execCommand(command string, args []string) ([]byte, error) { func (plugin *rbdPlugin) execCommand(command string, args []string) ([]byte, error) {

View File

@ -47,8 +47,8 @@ type fakeDiskManager struct{}
func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string { func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string {
return "/tmp/fake_rbd_path" return "/tmp/fake_rbd_path"
} }
func (fake *fakeDiskManager) AttachDisk(disk rbd) error { func (fake *fakeDiskManager) AttachDisk(b rbdBuilder) error {
globalPath := disk.manager.MakeGlobalPDName(disk) globalPath := b.manager.MakeGlobalPDName(*b.rbd)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
return err return err
@ -56,8 +56,8 @@ func (fake *fakeDiskManager) AttachDisk(disk rbd) error {
return nil return nil
} }
func (fake *fakeDiskManager) DetachDisk(disk rbd, mntPath string) error { func (fake *fakeDiskManager) DetachDisk(c rbdCleaner, mntPath string) error {
globalPath := disk.manager.MakeGlobalPDName(disk) globalPath := c.manager.MakeGlobalPDName(*c.rbd)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
return err return err

View File

@ -63,32 +63,32 @@ func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
} }
func (util *RBDUtil) rbdLock(rbd rbd, lock bool) error { func (util *RBDUtil) rbdLock(b rbdBuilder, lock bool) error {
var err error var err error
var output, locker string var output, locker string
var cmd []byte var cmd []byte
var secret_opt []string var secret_opt []string
if rbd.Secret != "" { if b.Secret != "" {
secret_opt = []string{"--key=" + rbd.Secret} secret_opt = []string{"--key=" + b.Secret}
} else { } else {
secret_opt = []string{"-k", rbd.Keyring} secret_opt = []string{"-k", b.Keyring}
} }
// construct lock id using host name and a magic prefix // construct lock id using host name and a magic prefix
lock_id := "kubelet_lock_magic_" + node.GetHostname("") lock_id := "kubelet_lock_magic_" + node.GetHostname("")
l := len(rbd.Mon) l := len(b.Mon)
// avoid mount storm, pick a host randomly // avoid mount storm, pick a host randomly
start := rand.Int() % l start := rand.Int() % l
// iterate all hosts until mount succeeds. // iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ { for i := start; i < start+l; i++ {
mon := rbd.Mon[i%l] mon := b.Mon[i%l]
// cmd "rbd lock list" serves two purposes: // cmd "rbd lock list" serves two purposes:
// for fencing, check if lock already held for this host // for fencing, check if lock already held for this host
// this edge case happens if host crashes in the middle of acquiring lock and mounting rbd // this edge case happens if host crashes in the middle of acquiring lock and mounting rbd
// for defencing, get the locker name, something like "client.1234" // for defencing, get the locker name, something like "client.1234"
cmd, err = rbd.plugin.execCommand("rbd", cmd, err = b.plugin.execCommand("rbd",
append([]string{"lock", "list", rbd.Image, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) append([]string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon}, secret_opt...))
output = string(cmd) output = string(cmd)
if err != nil { if err != nil {
@ -103,8 +103,8 @@ func (util *RBDUtil) rbdLock(rbd rbd, lock bool) error {
return nil return nil
} }
// hold a lock: rbd lock add // hold a lock: rbd lock add
cmd, err = rbd.plugin.execCommand("rbd", cmd, err = b.plugin.execCommand("rbd",
append([]string{"lock", "add", rbd.Image, lock_id, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) append([]string{"lock", "add", b.Image, lock_id, "--pool", b.Pool, "--id", b.Id, "-m", mon}, secret_opt...))
} else { } else {
// defencing, find locker name // defencing, find locker name
ind := strings.LastIndex(output, lock_id) - 1 ind := strings.LastIndex(output, lock_id) - 1
@ -115,8 +115,8 @@ func (util *RBDUtil) rbdLock(rbd rbd, lock bool) error {
} }
} }
// remove a lock: rbd lock remove // remove a lock: rbd lock remove
cmd, err = rbd.plugin.execCommand("rbd", cmd, err = b.plugin.execCommand("rbd",
append([]string{"lock", "remove", rbd.Image, lock_id, locker, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon}, secret_opt...)) append([]string{"lock", "remove", b.Image, lock_id, locker, "--pool", b.Pool, "--id", b.Id, "-m", mon}, secret_opt...))
} }
if err == nil { if err == nil {
@ -127,7 +127,7 @@ func (util *RBDUtil) rbdLock(rbd rbd, lock bool) error {
return err return err
} }
func (util *RBDUtil) persistRBD(rbd rbd, mnt string) error { func (util *RBDUtil) persistRBD(rbd rbdBuilder, mnt string) error {
file := path.Join(mnt, "rbd.json") file := path.Join(mnt, "rbd.json")
fp, err := os.Create(file) fp, err := os.Create(file)
if err != nil { if err != nil {
@ -159,47 +159,47 @@ func (util *RBDUtil) loadRBD(rbd *rbd, mnt string) error {
return nil return nil
} }
func (util *RBDUtil) fencing(rbd rbd) error { func (util *RBDUtil) fencing(b rbdBuilder) error {
// no need to fence readOnly // no need to fence readOnly
if rbd.ReadOnly { if b.ReadOnly {
return nil return nil
} }
return util.rbdLock(rbd, true) return util.rbdLock(b, true)
} }
func (util *RBDUtil) defencing(rbd rbd) error { func (util *RBDUtil) defencing(c rbdCleaner) error {
// no need to fence readOnly // no need to fence readOnly
if rbd.ReadOnly { if c.ReadOnly {
return nil return nil
} }
return util.rbdLock(rbd, false) return util.rbdLock(rbdBuilder{rbd: c.rbd}, false)
} }
func (util *RBDUtil) AttachDisk(rbd rbd) error { func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
var err error var err error
devicePath := strings.Join([]string{"/dev/rbd", rbd.Pool, rbd.Image}, "/") devicePath := strings.Join([]string{"/dev/rbd", b.Pool, b.Image}, "/")
exist := waitForPathToExist(devicePath, 1) exist := waitForPathToExist(devicePath, 1)
if !exist { if !exist {
// modprobe // modprobe
_, err = rbd.plugin.execCommand("modprobe", []string{"rbd"}) _, err = b.plugin.execCommand("modprobe", []string{"rbd"})
if err != nil { if err != nil {
return fmt.Errorf("rbd: failed to modprobe rbd error:%v", err) return fmt.Errorf("rbd: failed to modprobe rbd error:%v", err)
} }
// rbd map // rbd map
l := len(rbd.Mon) l := len(b.Mon)
// avoid mount storm, pick a host randomly // avoid mount storm, pick a host randomly
start := rand.Int() % l start := rand.Int() % l
// iterate all hosts until mount succeeds. // iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ { for i := start; i < start+l; i++ {
mon := rbd.Mon[i%l] mon := b.Mon[i%l]
glog.V(1).Infof("rbd: map mon %s", mon) glog.V(1).Infof("rbd: map mon %s", mon)
if rbd.Secret != "" { if b.Secret != "" {
_, err = rbd.plugin.execCommand("rbd", _, err = b.plugin.execCommand("rbd",
[]string{"map", rbd.Image, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon, "--key=" + rbd.Secret}) []string{"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "--key=" + b.Secret})
} else { } else {
_, err = rbd.plugin.execCommand("rbd", _, err = b.plugin.execCommand("rbd",
[]string{"map", rbd.Image, "--pool", rbd.Pool, "--id", rbd.Id, "-m", mon, "-k", rbd.Keyring}) []string{"map", b.Image, "--pool", b.Pool, "--id", b.Id, "-m", mon, "-k", b.Keyring})
} }
if err == nil { if err == nil {
break break
@ -214,8 +214,8 @@ func (util *RBDUtil) AttachDisk(rbd rbd) error {
return errors.New("Could not map image: Timeout after 10s") return errors.New("Could not map image: Timeout after 10s")
} }
// mount it // mount it
globalPDPath := rbd.manager.MakeGlobalPDName(rbd) globalPDPath := b.manager.MakeGlobalPDName(*b.rbd)
mountpoint, err := rbd.mounter.IsMountPoint(globalPDPath) mountpoint, err := b.mounter.IsMountPoint(globalPDPath)
// in the first time, the path shouldn't exist and IsMountPoint is expected to get NotExist // in the first time, the path shouldn't exist and IsMountPoint is expected to get NotExist
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath) return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath)
@ -229,43 +229,42 @@ func (util *RBDUtil) AttachDisk(rbd rbd) error {
} }
// fence off other mappers // fence off other mappers
if err := util.fencing(rbd); err != nil { if err := util.fencing(b); err != nil {
return fmt.Errorf("rbd: image %s is locked by other nodes", rbd.Image) return fmt.Errorf("rbd: image %s is locked by other nodes", b.Image)
} }
// rbd lock remove needs ceph and image config // rbd lock remove needs ceph and image config
// but kubelet doesn't get them from apiserver during teardown // but kubelet doesn't get them from apiserver during teardown
// so persit rbd config so upon disk detach, rbd lock can be removed // so persit rbd config so upon disk detach, rbd lock can be removed
// since rbd json is persisted in the same local directory that is used as rbd mountpoint later, // since rbd json is persisted in the same local directory that is used as rbd mountpoint later,
// the json file remains invisible during rbd mount and thus won't be removed accidentally. // the json file remains invisible during rbd mount and thus won't be removed accidentally.
util.persistRBD(rbd, globalPDPath) util.persistRBD(b, globalPDPath)
if err = rbd.mounter.Mount(devicePath, globalPDPath, rbd.fsType, nil); err != nil { if err = b.mounter.Mount(devicePath, globalPDPath, b.fsType, nil); err != nil {
err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, rbd.fsType, globalPDPath, err) err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
} }
return err return err
} }
func (util *RBDUtil) DetachDisk(rbd rbd, mntPath string) error { func (util *RBDUtil) DetachDisk(c rbdCleaner, mntPath string) error {
device, cnt, err := mount.GetDeviceNameFromMount(rbd.mounter, mntPath) device, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
if err != nil { if err != nil {
return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err) return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
} }
if err = rbd.mounter.Unmount(mntPath); err != nil { if err = c.mounter.Unmount(mntPath); err != nil {
return fmt.Errorf("rbd detach disk: failed to umount: %s\nError: %v", mntPath, err) return fmt.Errorf("rbd detach disk: failed to umount: %s\nError: %v", mntPath, err)
} }
// if device is no longer used, see if can unmap // if device is no longer used, see if can unmap
if cnt <= 1 { if cnt <= 1 {
// rbd unmap // rbd unmap
_, err = rbd.plugin.execCommand("rbd", []string{"unmap", device}) _, err = c.plugin.execCommand("rbd", []string{"unmap", device})
if err != nil { if err != nil {
return fmt.Errorf("rbd: failed to unmap device %s:Error: %v", device, err) return fmt.Errorf("rbd: failed to unmap device %s:Error: %v", device, err)
} }
// load ceph and image/pool info to remove fencing // load ceph and image/pool info to remove fencing
if err := util.loadRBD(&rbd, mntPath); err == nil { if err := util.loadRBD(c.rbd, mntPath); err == nil {
// remove rbd lock // remove rbd lock
util.defencing(rbd) util.defencing(c)
} }
glog.Infof("rbd: successfully unmap device %s", device) glog.Infof("rbd: successfully unmap device %s", device)