From 0c3b2f44a47836501957c19643ec5850cc04b348 Mon Sep 17 00:00:00 2001 From: Huamin Chen Date: Tue, 23 Aug 2016 13:22:35 -0400 Subject: [PATCH] review feedbacks Signed-off-by: Huamin Chen --- .../persistent-volume-provisioning/README.md | 21 ++-- .../rbd/rbd-storage-class.yaml | 4 +- pkg/volume/rbd/rbd.go | 108 ++++++++++-------- pkg/volume/rbd/rbd_test.go | 8 ++ pkg/volume/rbd/rbd_util.go | 70 +++++++++--- pkg/volume/util/util.go | 18 +++ 6 files changed, 151 insertions(+), 78 deletions(-) diff --git a/examples/experimental/persistent-volume-provisioning/README.md b/examples/experimental/persistent-volume-provisioning/README.md index 1457df7f0c1..818cc111697 100644 --- a/examples/experimental/persistent-volume-provisioning/README.md +++ b/examples/experimental/persistent-volume-provisioning/README.md @@ -131,21 +131,21 @@ parameters: provisioner: kubernetes.io/rbd parameters: monitors: 10.16.153.105:6789 - adminID: kube + adminId: kube adminSecretName: ceph-secret adminSecretNamespace: kube-system pool: kube userId: kube - secretName: ceph-secret-user + userSecretName: ceph-secret-user ``` -* `monitors`: Ceph monitors, comma delimited -* `adminID`: Ceph client ID that is capable of creating images in the pool. Default is "admin" -* `adminSecret`: Secret Name for `adminID` -* `adminSecretNamespace`: The namespace for `adminSecret`. Default is "default" -* `pool`: Ceph RBD pool. Default is "rbd" -* `userId`: Ceph client ID that is used to map the RBD image. Default is the same as `adminID` -* `secretName`: The name of Ceph Secret. It must exist in the same namespace as PVCs. +* `monitors`: Ceph monitors, comma delimited. It is required. +* `adminId`: Ceph client ID that is capable of creating images in the pool. Default is "admin". +* `adminSecret`: Secret Name for `adminId`. It is required. +* `adminSecretNamespace`: The namespace for `adminSecret`. Default is "default". +* `pool`: Ceph RBD pool. Default is "rbd". +* `userId`: Ceph client ID that is used to map the RBD image. Default is the same as `adminId`. +* `userSecretName`: The name of Ceph Secret for `userId` to map RBD image. It must exist in the same namespace as PVCs. It is required. ### User provisioning requests @@ -179,6 +179,7 @@ In the future, the storage class may remain in an annotation or become a field o ### Sample output #### GCE + This example uses GCE but any provisioner would follow the same flow. First we note there are no Persistent Volumes in the cluster. After creating a storage class and a claim including that storage class, we see a new PV is created @@ -231,6 +232,7 @@ Before creating PVC in user's namespace (e.g. myns), make sure the Ceph user's S ``` $ kubectl create -f examples/experimental/persistent-volume-provisioning/rbd/ceph-secret-user.yaml --namespace=myns ``` + Now create a PVC in user's namespace (e.g. myns): ``` @@ -238,6 +240,7 @@ $ kubectl create -f examples/experimental/persistent-volume-provisioning/claim1. ``` Check the PV and PVC are created: + ``` $ kubectl describe pvc --namespace=myns Name: claim1 diff --git a/examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml b/examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml index 26e3679df22..9656c2999d0 100644 --- a/examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml +++ b/examples/experimental/persistent-volume-provisioning/rbd/rbd-storage-class.yaml @@ -5,10 +5,10 @@ metadata: provisioner: kubernetes.io/rbd parameters: monitors: 10.16.153.105:6789 - adminID: admin + adminId: admin adminSecretName: ceph-secret-admin adminSecretNamespace: "kube-system" pool: kube userId: kube - secretName: ceph-secret-user + userSecretName: ceph-secret-user diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index b32947f5b37..70a21534b08 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -23,12 +23,14 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/volume" + volutil "k8s.io/kubernetes/pkg/volume/util" ) // This is the primary entrypoint for volume plugins. @@ -51,6 +53,7 @@ const ( annCephAdminID = "rbd.kubernetes.io/admin" annCephAdminSecretName = "rbd.kubernetes.io/adminsecretname" annCephAdminSecretNameSpace = "rbd.kubernetes.io/adminsecretnamespace" + secretKeyName = "key" // key name used in secret ) func (plugin *rbdPlugin) Init(host volume.VolumeHost) error { @@ -99,8 +102,8 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.Vo source, _ := plugin.getRBDVolumeSource(spec) if source.SecretRef != nil { - if secret, err = plugin.getSecret(pod.Namespace, source.SecretRef.Name); err != nil { - glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, source.SecretRef) + if secret, err = parseSecret(pod.Namespace, source.SecretRef.Name, plugin.host.GetKubeClient()); err != nil { + glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, source.SecretRef) return nil, err } } @@ -109,25 +112,6 @@ func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.Vo return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret) } -func (plugin *rbdPlugin) getSecret(namespace, secretName string) (string, error) { - secret := "" - kubeClient := plugin.host.GetKubeClient() - if kubeClient == nil { - return "", fmt.Errorf("Cannot get kube client") - } - - secrets, err := kubeClient.Core().Secrets(namespace).Get(secretName) - if err != nil { - return "", err - } - for name, data := range secrets.Data { - secret = string(data) - glog.V(4).Infof("ceph secret [%q/%q] info: %s/%s", namespace, secretName, name, secret) - } - return secret, nil - -} - func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) { // rbd volumes used directly in a pod have a ReadOnly flag set by the pod author. // rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV @@ -199,14 +183,15 @@ func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolumeSource.Spec.RBD is nil") } - admin, adminSecretName, adminSecretNamespace, err := selectorToParam(spec.PersistentVolume) + admin, adminSecretName, adminSecretNamespace, err := annotationsToParam(spec.PersistentVolume) if err != nil { - return nil, fmt.Errorf("cannot find Ceph credentials to delete rbd PV") + return nil, fmt.Errorf("cannot find Ceph credentials to delete rbd PV: %v", err) } - secret := "" - if secret, err = plugin.getSecret(adminSecretNamespace, adminSecretName); err != nil { + + secret, err := parseSecret(adminSecretNamespace, adminSecretName, plugin.host.GetKubeClient()) + if err != nil { // log error but don't return yet - glog.Errorf("failed to get admin secret from [%q/%q]", adminSecretNamespace, adminSecretName) + glog.Errorf("failed to get admin secret from [%q/%q]: %v", adminSecretNamespace, adminSecretName, err) } return plugin.newDeleterInternal(spec, admin, secret, &RBDUtil{}) } @@ -221,9 +206,9 @@ func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret str manager: manager, plugin: plugin, }, - Mon: spec.PersistentVolume.Spec.RBD.CephMonitors, - Id: admin, - Secret: secret, + Mon: spec.PersistentVolume.Spec.RBD.CephMonitors, + adminId: admin, + adminSecret: secret, }}, nil } @@ -259,7 +244,6 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { adminSecretName := "" adminSecretNamespace := "default" secretName := "" - userId := "" secret := "" for k, v := range r.options.Parameters { @@ -270,16 +254,16 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { r.Mon = append(r.Mon, m) } case "adminid": - r.Id = v + r.adminId = v case "adminsecretname": adminSecretName = v case "adminsecretnamespace": adminSecretNamespace = v case "userid": - userId = v + r.Id = v case "pool": r.Pool = v - case "secretname": + case "usersecretname": secretName = v default: return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, r.plugin.GetPluginName()) @@ -289,25 +273,25 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { if adminSecretName == "" { return nil, fmt.Errorf("missing Ceph admin secret name") } - if secret, err = r.plugin.getSecret(adminSecretNamespace, adminSecretName); err != nil { + if secret, err = parseSecret(adminSecretNamespace, adminSecretName, r.plugin.host.GetKubeClient()); err != nil { // log error but don't return yet glog.Errorf("failed to get admin secret from [%q/%q]", adminSecretNamespace, adminSecretName) } - r.Secret = secret + r.adminSecret = secret if len(r.Mon) < 1 { return nil, fmt.Errorf("missing Ceph monitors") } if secretName == "" { - return nil, fmt.Errorf("missing secret name") + return nil, fmt.Errorf("missing user secret name") } - if r.Id == "" { - r.Id = "admin" + if r.adminId == "" { + r.adminId = "admin" } if r.Pool == "" { r.Pool = "rbd" } - if userId == "" { - userId = r.Id + if r.Id == "" { + r.Id = r.adminId } // create random image name @@ -318,10 +302,11 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { glog.Errorf("rbd: create volume failed, err: %v", err) return nil, fmt.Errorf("rbd: create volume failed, err: %v", err) } + glog.Infof("successfully created rbd image %q", image) pv := new(api.PersistentVolume) rbd.SecretRef = new(api.LocalObjectReference) rbd.SecretRef.Name = secretName - rbd.RadosUser = userId + rbd.RadosUser = r.Id pv.Spec.PersistentVolumeSource.RBD = rbd pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy pv.Spec.AccessModes = r.options.AccessModes @@ -329,7 +314,7 @@ func (r *rbdVolumeProvisioner) Provision() (*api.PersistentVolume, error) { api.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf("%dMi", sizeMB)), } // place parameters in pv selector - paramToSelector(r.Id, adminSecretNamespace, adminSecretName, pv) + paramToAnnotations(r.adminId, adminSecretNamespace, adminSecretName, pv) return pv, nil } @@ -346,7 +331,7 @@ func (r *rbdVolumeDeleter) Delete() error { return r.manager.DeleteImage(r) } -func paramToSelector(admin, adminSecretNamespace, adminSecretName string, pv *api.PersistentVolume) { +func paramToAnnotations(admin, adminSecretNamespace, adminSecretName string, pv *api.PersistentVolume) { if pv.Annotations == nil { pv.Annotations = make(map[string]string) } @@ -355,9 +340,9 @@ func paramToSelector(admin, adminSecretNamespace, adminSecretName string, pv *ap pv.Annotations[annCephAdminSecretNameSpace] = adminSecretNamespace } -func selectorToParam(pv *api.PersistentVolume) (string, string, string, error) { +func annotationsToParam(pv *api.PersistentVolume) (string, string, string, error) { if pv.Annotations == nil { - return "", "", "", fmt.Errorf("PV has no annotation, cannot get Ceph admin cedentials") + return "", "", "", fmt.Errorf("PV has no annotation, cannot get Ceph admin credentials") } var admin, adminSecretName, adminSecretNamespace string found := false @@ -398,11 +383,13 @@ func (rbd *rbd) GetPath() string { type rbdMounter struct { *rbd // capitalized so they can be exported in persistRBD() - Mon []string - Id string - Keyring string - Secret string - fsType string + Mon []string + Id string + Keyring string + Secret string + fsType string + adminSecret string + adminId string } var _ volume.Mounter = &rbdMounter{} @@ -461,3 +448,24 @@ func getVolumeSource( return nil, false, fmt.Errorf("Spec does not reference a RBD volume type") } + +// parseSecretMap locates the secret by key name. +func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) { + secretMap, err := volutil.GetSecret(namespace, secretName, kubeClient) + if err != nil { + glog.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + return "", fmt.Errorf("failed to get secret from [%q/%q]", namespace, secretName) + } + if len(secretMap) == 0 { + return "", fmt.Errorf("empty secret map") + } + secret := "" + for k, v := range secretMap { + if k == secretKeyName { + return v, nil + } + secret = v + } + // If not found, the last secret in the map wins as done before + return secret, nil +} diff --git a/pkg/volume/rbd/rbd_test.go b/pkg/volume/rbd/rbd_test.go index a1bbfea4a3f..b7f28b545a5 100644 --- a/pkg/volume/rbd/rbd_test.go +++ b/pkg/volume/rbd/rbd_test.go @@ -87,6 +87,14 @@ func (fake *fakeDiskManager) DetachDisk(c rbdUnmounter, mntPath string) error { return nil } +func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *api.RBDVolumeSource, volumeSizeGB int, err error) { + return nil, 0, fmt.Errorf("not implemented") +} + +func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error { + return fmt.Errorf("not implemented") +} + func doTestPlugin(t *testing.T, spec *volume.Spec) { tmpDir, err := utiltesting.MkTmpdir("rbd_test") if err != nil { diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 2fa2745fd07..866c12c8027 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -41,6 +41,10 @@ import ( "k8s.io/kubernetes/pkg/volume" ) +const ( + imageWatcherStr = "watcher=" +) + // search /sys/bus for rbd device that matches given pool and image func getDevFromImageAndPool(pool, image string) (string, bool) { // /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool @@ -312,8 +316,7 @@ func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error { func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSource, size int, err error) { volSizeBytes := p.options.Capacity.Value() // convert to MB that rbd defaults on - const mb = 1024 * 1024 - sz := int((volSizeBytes + mb - 1) / mb) + sz := int(volume.RoundUpSize(volSizeBytes, 1024*1024)) volSz := fmt.Sprintf("%d", sz) // rbd create l := len(p.rbdMounter.Mon) @@ -322,24 +325,15 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSourc // iterate all monitors until create succeeds. for i := start; i < start+l; i++ { mon := p.Mon[i%l] - glog.V(4).Infof("rbd: create %s size %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, mon, p.rbdMounter.Pool, p.rbdMounter.Id, p.rbdMounter.Secret) + glog.V(4).Infof("rbd: create %s size %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, volSz, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) var output []byte output, err = p.rbdMounter.plugin.execCommand("rbd", - []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.Id, "-m", mon, "--key=" + p.rbdMounter.Secret}) + []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", "1"}) if err == nil { break } else { - glog.V(4).Infof("failed to create rbd image, output %v", string(output)) + glog.Warningf("failed to create rbd image, output %v", string(output)) } - // if failed, fall back to image format 1 - output, err = p.rbdMounter.plugin.execCommand("rbd", - []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.Id, "-m", mon, "--key=" + p.rbdMounter.Secret, "--image-format", "1"}) - if err == nil { - break - } else { - glog.V(4).Infof("failed to create rbd image, output %v", string(output)) - } - } if err != nil { @@ -355,8 +349,15 @@ func (util *RBDUtil) CreateImage(p *rbdVolumeProvisioner) (r *api.RBDVolumeSourc } func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { - var err error var output []byte + found, err := util.rbdStatus(p.rbdMounter) + if err != nil { + return err + } + if found { + glog.Info("rbd is still being used ", p.rbdMounter.Image) + return fmt.Errorf("rbd %s is still being used", p.rbdMounter.Image) + } // rbd rm l := len(p.rbdMounter.Mon) // pick a mon randomly @@ -364,9 +365,9 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { // iterate all monitors until rm succeeds. for i := start; i < start+l; i++ { mon := p.rbdMounter.Mon[i%l] - glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.Id, p.rbdMounter.Secret) + glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminId, p.rbdMounter.adminSecret) output, err = p.plugin.execCommand("rbd", - []string{"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.Id, "-m", mon, "--key=" + p.rbdMounter.Secret}) + []string{"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminId, "-m", mon, "--key=" + p.rbdMounter.adminSecret}) if err == nil { return nil } else { @@ -375,3 +376,38 @@ func (util *RBDUtil) DeleteImage(p *rbdVolumeDeleter) error { } return err } + +// run rbd status command to check if there is watcher on the image +func (util *RBDUtil) rbdStatus(b *rbdMounter) (bool, error) { + var err error + var output string + var cmd []byte + + l := len(b.Mon) + start := rand.Int() % l + // iterate all hosts until mount succeeds. + for i := start; i < start+l; i++ { + mon := b.Mon[i%l] + // cmd "rbd status" list the rbd client watch with the following output: + // Watchers: + // watcher=10.16.153.105:0/710245699 client.14163 cookie=1 + glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", b.Image, mon, b.Pool, b.adminId, b.adminSecret) + cmd, err = b.plugin.execCommand("rbd", + []string{"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", b.adminId, "--key=" + b.adminSecret}) + output = string(cmd) + + if err != nil { + // ignore error code, just checkout output for watcher string + glog.Warningf("failed to execute rbd status on mon %s", mon) + } + + if strings.Contains(output, imageWatcherStr) { + glog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output) + return true, nil + } else { + glog.Warningf("rbd: no watchers on %s", b.Image) + return false, nil + } + } + return false, nil +} diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 9859506a14a..c607cf6d99e 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -22,6 +22,7 @@ import ( "path" "github.com/golang/glog" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/util/mount" ) @@ -108,3 +109,20 @@ func PathExists(path string) (bool, error) { return false, err } } + +// GetSecret locates secret by name and namespace and returns secret map +func GetSecret(namespace, secretName string, kubeClient clientset.Interface) (map[string]string, error) { + secret := make(map[string]string) + if kubeClient == nil { + return secret, fmt.Errorf("Cannot get kube client") + } + + secrets, err := kubeClient.Core().Secrets(namespace).Get(secretName) + if err != nil { + return secret, err + } + for name, data := range secrets.Data { + secret[name] = string(data) + } + return secret, nil +}