Merge pull request #14044 from rootfs/rbd_enhancement

Multiple rbd volume enhancements
This commit is contained in:
Alex Robinson 2015-10-05 17:36:11 -07:00
commit 608244fbb0
3 changed files with 58 additions and 17 deletions

View File

@ -35,7 +35,7 @@ Documentation for other releases can be found at
Install Ceph on the Kubernetes host. For example, on Fedora 21 Install Ceph on the Kubernetes host. For example, on Fedora 21
# yum -y install ceph # yum -y install ceph-common
If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/docker-ceph) If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/docker-ceph)
@ -54,7 +54,14 @@ Once you have installed Ceph and new Kubernetes, you can create a pod based on m
# Use Ceph Authentication Secret # Use Ceph Authentication Secret
If Ceph authentication secret is provided, the secret should be first be base64 encoded, then encoded string is placed in a secret yaml. An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command. If Ceph authentication secret is provided, the secret should be first be *base64 encoded*, then encoded string is placed in a secret yaml. For example, getting Ceph user `kube`'s base64 encoded secret can use the following command:
```console
# grep key /etc/ceph/ceph.client.kube.keyring |awk '{printf "%s", $NF}'|base64
QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ==
```
An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command.
```console ```console
# kubectl create -f examples/rbd/secret/ceph-secret.yaml # kubectl create -f examples/rbd/secret/ceph-secret.yaml

View File

@ -131,7 +131,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
Pool: pool, Pool: pool,
ReadOnly: readOnly, ReadOnly: readOnly,
manager: manager, manager: manager,
mounter: mounter, mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
plugin: plugin, plugin: plugin,
}, },
Mon: source.CephMonitors, Mon: source.CephMonitors,

View File

@ -25,31 +25,63 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"path" "path"
"regexp"
"strings" "strings"
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
) )
// stat a path, if not exists, retry maxRetries times // search /sys/bus for rbd device that matches given pool and image
func waitForPathToExist(devicePath string, maxRetries int) bool { func getDevFromImageAndPool(pool, image string) (string, bool) {
for i := 0; i < maxRetries; i++ { // /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
_, err := os.Stat(devicePath) sys_path := "/sys/bus/rbd/devices"
if err == nil { if dirs, err := ioutil.ReadDir(sys_path); err == nil {
return true for _, f := range dirs {
// pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// first match pool, then match name
po := path.Join(sys_path, name, "pool")
img := path.Join(sys_path, name, "name")
exe := exec.New()
out, err := exe.Command("cat", po, img).CombinedOutput()
if err != nil {
continue
} }
if err != nil && !os.IsNotExist(err) { matched, err := regexp.MatchString("^"+pool+"\n"+image+"\n$", string(out))
return false if err != nil || !matched {
continue
}
// found a match, check if device exists
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}
// stat a path, if not exists, retry maxRetries times
func waitForPath(pool, image string, maxRetries int) (string, bool) {
for i := 0; i < maxRetries; i++ {
devicePath, found := getDevFromImageAndPool(pool, image)
if found {
return devicePath, true
} }
time.Sleep(time.Second) time.Sleep(time.Second)
} }
return false return "", false
} }
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image // make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
@ -178,9 +210,9 @@ func (util *RBDUtil) defencing(c rbdCleaner) error {
func (util *RBDUtil) AttachDisk(b rbdBuilder) error { func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
var err error var err error
devicePath := strings.Join([]string{"/dev/rbd", b.Pool, b.Image}, "/")
exist := waitForPathToExist(devicePath, 1) devicePath, found := waitForPath(b.Pool, b.Image, 1)
if !exist { if !found {
// modprobe // modprobe
_, err = b.plugin.execCommand("modprobe", []string{"rbd"}) _, err = b.plugin.execCommand("modprobe", []string{"rbd"})
if err != nil { if err != nil {
@ -209,8 +241,8 @@ func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
if err != nil { if err != nil {
return err return err
} }
exist = waitForPathToExist(devicePath, 10) devicePath, found = waitForPath(b.Pool, b.Image, 10)
if !exist { if !found {
return errors.New("Could not map image: Timeout after 10s") return errors.New("Could not map image: Timeout after 10s")
} }
// mount it // mount it
@ -230,6 +262,8 @@ func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
// fence off other mappers // fence off other mappers
if err := util.fencing(b); err != nil { if err := util.fencing(b); err != nil {
// rbd unmap before exit
b.plugin.execCommand("rbd", []string{"unmap", devicePath})
return fmt.Errorf("rbd: image %s is locked by other nodes", b.Image) return fmt.Errorf("rbd: image %s is locked by other nodes", b.Image)
} }
// rbd lock remove needs ceph and image config // rbd lock remove needs ceph and image config