CephRBD volume plugin ( ) and its csi migration support were removed in this release

This commit is contained in:
carlory 2024-04-26 11:38:30 +08:00
parent 57b406a18a
commit c8e91b9bc2
24 changed files with 8 additions and 4305 deletions

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/portworx"
"k8s.io/kubernetes/pkg/volume/rbd"
)
type probeFn func() []volume.VolumePlugin
@ -57,7 +56,6 @@ type pluginInfo struct {
func appendAttachableLegacyProviderVolumes(logger klog.Logger, allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) {
pluginMigrationStatus := make(map[string]pluginInfo)
pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins}
pluginMigrationStatus[plugins.RBDVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationRBD, pluginUnregisterFeature: features.InTreePluginRBDUnregister, pluginProbeFunction: rbd.ProbeVolumePlugins}
var err error
for pluginName, pluginInfo := range pluginMigrationStatus {
allPlugins, err = appendPluginBasedOnFeatureFlags(logger, allPlugins, pluginName, featureGate, pluginInfo)

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/portworx"
"k8s.io/kubernetes/pkg/volume/rbd"
)
type probeFn func() []volume.VolumePlugin
@ -57,7 +56,6 @@ type pluginInfo struct {
func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) {
pluginMigrationStatus := make(map[string]pluginInfo)
pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins}
pluginMigrationStatus[plugins.RBDVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationRBD, pluginUnregisterFeature: features.InTreePluginRBDUnregister, pluginProbeFunction: rbd.ProbeVolumePlugins}
var err error
for pluginName, pluginInfo := range pluginMigrationStatus {
allPlugins, err = appendPluginBasedOnFeatureFlags(allPlugins, pluginName, featureGate, pluginInfo)

View File

@ -141,13 +141,6 @@ const (
// Enables the Portworx in-tree driver to Portworx migration feature.
CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx"
// owner: @humblec
// alpha: v1.23
// deprecated: v1.28
//
// Enables the RBD in-tree driver to RBD CSI Driver migration feature.
CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD"
// owner: @fengzixu
// alpha: v1.21
//
@ -305,13 +298,6 @@ const (
// Disables the Portworx in-tree driver.
InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister"
// owner: @humblec
// alpha: v1.23
// deprecated: v1.28
//
// Disables the RBD in-tree driver.
InTreePluginRBDUnregister featuregate.Feature = "InTreePluginRBDUnregister"
// owner: @divyenpatel
// alpha: v1.21
//
@ -996,8 +982,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CSIMigrationPortworx: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Portworx CSI driver)
CSIMigrationRBD: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31
CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha},
CloudControllerManagerWebhook: {Default: false, PreRelease: featuregate.Alpha},
@ -1044,8 +1028,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginRBDUnregister: {Default: false, PreRelease: featuregate.Deprecated}, // deprecated in 1.28, remove in 1.31
InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha},
JobBackoffLimitPerIndex: {Default: true, PreRelease: featuregate.Beta},

View File

@ -55,7 +55,6 @@ import (
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/portworx"
"k8s.io/kubernetes/pkg/volume/projected"
"k8s.io/kubernetes/pkg/volume/rbd"
"k8s.io/kubernetes/pkg/volume/secret"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
@ -76,7 +75,6 @@ func volumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, nfs.ProbeVolumePlugins(volume.VolumeConfig{})...)
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, downwardapi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)

View File

@ -51,10 +51,6 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
return true
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.RBDVolumePluginName:
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationRBD) {
return false
}
default:
return false
}

View File

@ -1100,8 +1100,6 @@ func isCSIMigrationOnForPlugin(pluginName string) bool {
return true
case csiplugins.PortworxVolumePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx)
case csiplugins.RBDVolumePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationRBD)
}
return false
}

View File

@ -250,9 +250,6 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
csitranslationplugins.PortworxVolumePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx)
},
csitranslationplugins.RBDVolumePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationRBD)
},
}
// Initializing the label management channels

View File

@ -74,8 +74,6 @@ func (pm PluginManager) IsMigrationCompleteForPlugin(pluginName string) bool {
return pm.featureGate.Enabled(features.InTreePluginvSphereUnregister)
case csilibplugins.PortworxVolumePluginName:
return pm.featureGate.Enabled(features.InTreePluginPortworxUnregister)
case csilibplugins.RBDVolumePluginName:
return pm.featureGate.Enabled(features.InTreePluginRBDUnregister)
default:
return false
}
@ -102,8 +100,6 @@ func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool {
return true
case csilibplugins.PortworxVolumePluginName:
return pm.featureGate.Enabled(features.CSIMigrationPortworx)
case csilibplugins.RBDVolumePluginName:
return pm.featureGate.Enabled(features.CSIMigrationRBD)
default:
return false
}

View File

@ -39,8 +39,8 @@ func TestIsMigratable(t *testing.T) {
spec *volume.Spec
}{
{
name: "RBD PV source with CSIMigrationGCE enabled",
pluginFeature: features.CSIMigrationRBD,
name: "Portworx PV source with CSIMigrationPortworx enabled",
pluginFeature: features.CSIMigrationPortworx,
pluginFeatureEnabled: true,
isMigratable: true,
csiMigrationEnabled: true,
@ -48,8 +48,8 @@ func TestIsMigratable(t *testing.T) {
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
RBDImage: "test-disk",
PortworxVolume: &v1.PortworxVolumeSource{
VolumeID: "test-volume",
},
},
},
@ -57,8 +57,8 @@ func TestIsMigratable(t *testing.T) {
},
},
{
name: "RBD PD PV Source with CSIMigrationGCE disabled",
pluginFeature: features.CSIMigrationRBD,
name: "Portworx PD PV Source with CSIMigrationPortworx disabled",
pluginFeature: features.CSIMigrationPortworx,
pluginFeatureEnabled: false,
isMigratable: false,
csiMigrationEnabled: true,
@ -66,8 +66,8 @@ func TestIsMigratable(t *testing.T) {
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
RBDImage: "test-disk",
PortworxVolume: &v1.PortworxVolumeSource{
VolumeID: "test-volume",
},
},
},

View File

@ -1,15 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- jsafrane
- humblec
reviewers:
- sjenning
- saad-ali
- jsafrane
- jingxu97
- msau42
- cofyc
- humblec
emeritus_approvers:
- rootfs

View File

@ -1,269 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"time"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
// NewAttacher implements AttachableVolumePlugin.NewAttacher.
func (plugin *rbdPlugin) NewAttacher() (volume.Attacher, error) {
return plugin.newAttacherInternal(&rbdUtil{})
}
// NewDeviceMounter implements DeviceMountableVolumePlugin.NewDeviceMounter
func (plugin *rbdPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *rbdPlugin) newAttacherInternal(manager diskManager) (volume.Attacher, error) {
return &rbdAttacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// NewDetacher implements AttachableVolumePlugin.NewDetacher.
func (plugin *rbdPlugin) NewDetacher() (volume.Detacher, error) {
return plugin.newDetacherInternal(&rbdUtil{})
}
// NewDeviceUnmounter implements DeviceMountableVolumePlugin.NewDeviceUnmounter
func (plugin *rbdPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
func (plugin *rbdPlugin) newDetacherInternal(manager diskManager) (volume.Detacher, error) {
return &rbdDetacher{
plugin: plugin,
manager: manager,
mounter: volutil.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
}, nil
}
// GetDeviceMountRefs implements AttachableVolumePlugin.GetDeviceMountRefs.
func (plugin *rbdPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mounter.GetMountRefs(deviceMountPath)
}
func (plugin *rbdPlugin) CanAttach(spec *volume.Spec) (bool, error) {
return true, nil
}
func (plugin *rbdPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
// rbdAttacher implements volume.Attacher interface.
type rbdAttacher struct {
plugin *rbdPlugin
mounter *mount.SafeFormatAndMount
manager diskManager
}
var _ volume.Attacher = &rbdAttacher{}
var _ volume.DeviceMounter = &rbdAttacher{}
// Attach implements Attacher.Attach.
// We do not lock image here, because it requires kube-controller-manager to
// access external `rbd` utility. And there is no need since AttachDetach
// controller will not try to attach RWO volumes which are already attached to
// other nodes.
func (attacher *rbdAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
// VolumesAreAttached implements Attacher.VolumesAreAttached.
// There is no way to confirm whether the volume is attached or not from
// outside of the kubelet node. This method needs to return true always, like
// iSCSI, FC plugin.
func (attacher *rbdAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
// WaitForAttach implements Attacher.WaitForAttach. It's called by kubelet to
// attach volume onto the node.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
klog.V(4).Infof("rbd: waiting for attach volume (name: %s) for pod (name: %s, uid: %s)", spec.Name(), pod.Name, pod.UID)
mounter, err := attacher.plugin.createMounterFromVolumeSpecAndPod(spec, pod)
if err != nil {
klog.Warningf("failed to create mounter: %v", spec)
return "", err
}
realDevicePath, err := attacher.manager.AttachDisk(*mounter)
if err != nil {
return "", err
}
klog.V(3).Infof("rbd: successfully wait for attach volume (spec: %s, pool: %s, image: %s) at %s", spec.Name(), mounter.Pool, mounter.Image, realDevicePath)
return realDevicePath, nil
}
// GetDeviceMountPath implements Attacher.GetDeviceMountPath.
func (attacher *rbdAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
img, err := getVolumeSourceImage(spec)
if err != nil {
return "", err
}
pool, err := getVolumeSourcePool(spec)
if err != nil {
return "", err
}
return makePDNameInternal(attacher.plugin.host, pool, img), nil
}
// MountDevice implements Attacher.MountDevice. It is called by the kubelet to
// mount device at the given mount path.
// This method is idempotent, callers are responsible for retrying on failure.
func (attacher *rbdAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mountArgs volume.DeviceMounterArgs) error {
klog.V(4).Infof("rbd: mouting device %s to %s", devicePath, deviceMountPath)
notMnt, err := attacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
if !notMnt {
return nil
}
fstype, err := getVolumeSourceFSType(spec)
if err != nil {
return err
}
ro, err := getVolumeSourceReadOnly(spec)
if err != nil {
return err
}
options := []string{}
if ro {
options = append(options, "ro")
}
if mountArgs.SELinuxLabel != "" {
options = volutil.AddSELinuxMountOption(options, mountArgs.SELinuxLabel)
}
mountOptions := volutil.MountOptionFromSpec(spec, options...)
err = attacher.mounter.FormatAndMount(devicePath, deviceMountPath, fstype, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return fmt.Errorf("rbd: failed to mount device %s at %s (fstype: %s), error %v", devicePath, deviceMountPath, fstype, err)
}
klog.V(3).Infof("rbd: successfully mount device %s at %s (fstype: %s)", devicePath, deviceMountPath, fstype)
return nil
}
// rbdDetacher implements volume.Detacher interface.
type rbdDetacher struct {
plugin *rbdPlugin
manager diskManager
mounter *mount.SafeFormatAndMount
}
var _ volume.Detacher = &rbdDetacher{}
var _ volume.DeviceUnmounter = &rbdDetacher{}
// UnmountDevice implements Detacher.UnmountDevice. It unmounts the global
// mount of the RBD image. This is called once all bind mounts have been
// unmounted.
// Internally, it does four things:
// - Unmount device from deviceMountPath.
// - Detach device from the node.
// - Remove lock if found. (No need to check volume readonly or not, because
// device is not on the node anymore, it's safe to remove lock.)
// - Remove the deviceMountPath at last.
//
// This method is idempotent, callers are responsible for retrying on failure.
func (detacher *rbdDetacher) UnmountDevice(deviceMountPath string) error {
if pathExists, pathErr := mount.PathExists(deviceMountPath); pathErr != nil {
return fmt.Errorf("error checking if path exists: %v", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", deviceMountPath)
return nil
}
devicePath, _, err := mount.GetDeviceNameFromMount(detacher.mounter, deviceMountPath)
if err != nil {
return err
}
// Unmount the device from the device mount point.
notMnt, err := detacher.mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
return err
}
if !notMnt {
klog.V(4).Infof("rbd: unmouting device mountpoint %s", deviceMountPath)
if err = detacher.mounter.Unmount(deviceMountPath); err != nil {
return err
}
klog.V(3).Infof("rbd: successfully unmount device mountpath %s", deviceMountPath)
}
// Get devicePath from deviceMountPath if devicePath is empty
if devicePath == "" {
rbdImageInfo, err := getRbdImageInfo(deviceMountPath)
if err != nil {
return err
}
found := false
devicePath, found = getRbdDevFromImageAndPool(rbdImageInfo.pool, rbdImageInfo.name)
if !found {
klog.Warningf("rbd: can't found devicePath for %v. Device is already unmounted, Image %v, Pool %v", deviceMountPath, rbdImageInfo.pool, rbdImageInfo.name)
}
}
if devicePath != "" {
klog.V(4).Infof("rbd: detaching device %s", devicePath)
err = detacher.manager.DetachDisk(detacher.plugin, deviceMountPath, devicePath)
if err != nil {
return err
}
klog.V(3).Infof("rbd: successfully detach device %s", devicePath)
}
err = os.Remove(deviceMountPath)
if err != nil {
return err
}
klog.V(3).Infof("rbd: successfully remove device mount point %s", deviceMountPath)
return nil
}
// Detach implements Detacher.Detach.
func (detacher *rbdDetacher) Detach(volumeName string, nodeName types.NodeName) error {
return nil
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abstract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"fmt"
"os"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// Abstract interface to disk operations.
type diskManager interface {
// MakeGlobalPDName creates global persistent disk path.
MakeGlobalPDName(disk rbd) string
// MakeGlobalVDPDName creates global block disk path.
MakeGlobalVDPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
// If it successfully attaches, the path to the device
// is returned. Otherwise, an error will be returned.
AttachDisk(disk rbdMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error
// Detaches the block disk from the kubelet's host machine.
DetachBlockDisk(disk rbdDiskUnmapper, mntPath string) error
// Creates a rbd image.
CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error)
// Deletes a rbd image.
DeleteImage(deleter *rbdVolumeDeleter) error
// Expands a rbd image
ExpandImage(expander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64, fsGroupChangePolicy *v1.PodFSGroupChangePolicy) error {
globalPDPath := manager.MakeGlobalPDName(*b.rbd)
notMnt, err := mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", globalPDPath)
return err
}
if notMnt {
return fmt.Errorf("no device is mounted at %s", globalPDPath)
}
notMnt, err = mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(volPath, 0750); err != nil {
klog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if (&b).GetAttributes().ReadOnly {
options = append(options, "ro")
}
mountOptions := util.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
if err != nil {
klog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
klog.V(3).Infof("rbd: successfully bind mount %s to %s with options %v", globalPDPath, volPath, mountOptions)
if !b.ReadOnly {
volume.SetVolumeOwnership(&b, volPath, fsGroup, fsGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil))
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
klog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if notMnt {
klog.V(3).Infof("volume path %s is not a mountpoint, deleting", volPath)
return os.Remove(volPath)
}
// Unmount the bind-mount inside this pod.
if err := mounter.Unmount(volPath); err != nil {
klog.Errorf("failed to unmount %s", volPath)
return err
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil && !os.IsNotExist(mntErr) {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return mntErr
}
if notMnt {
if err := os.Remove(volPath); err != nil {
klog.V(2).Infof("Error removing mountpoint %s: %v", volPath, err)
return err
}
}
return nil
}

View File

@ -1,19 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbd contains the internal representation of Rados Block Store (Ceph)
// volumes.
package rbd // import "k8s.io/kubernetes/pkg/volume/rbd"

File diff suppressed because it is too large Load Diff

View File

@ -1,764 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"testing"
"time"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testVolName = "vol-1234"
testRBDImage = "volume-a4b47414-a675-47dc-a9cc-c223f13439b0"
testRBDPool = "volumes"
testGlobalPath = "plugins/kubernetes.io/rbd/volumeDevices/volumes-image-volume-a4b47414-a675-47dc-a9cc-c223f13439b0"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/rbd/volumeDevices/pdVol1
tmpVDir, err := utiltesting.MkTmpdir("rbdBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("", testVolName)
if badspec != nil || err == nil {
t.Fatalf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, testVolName)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Name != testVolName {
t.Errorf("Invalid spec name for GlobalMapPath spec: %s", spec.PersistentVolume.Name)
}
if spec.PersistentVolume.Spec.RBD.RBDPool != testRBDPool {
t.Errorf("Invalid RBDPool from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.RBD.RBDPool)
}
if spec.PersistentVolume.Spec.RBD.RBDImage != testRBDImage {
t.Errorf("Invalid RBDImage from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.RBD.RBDImage)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v - %v", *specMode, block)
}
}
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{RBD: &v1.RBDVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakeDiskManager struct {
// Make sure we can run tests in parallel.
mutex sync.RWMutex
// Key format: "<pool>/<image>"
rbdImageLocks map[string]bool
rbdMapIndex int
rbdDevices map[string]bool
}
func newFakeDiskManager() *fakeDiskManager {
return &fakeDiskManager{
rbdImageLocks: make(map[string]bool),
rbdMapIndex: 0,
rbdDevices: make(map[string]bool),
}
}
func (fake *fakeDiskManager) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) MakeGlobalVDPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func (fake *fakeDiskManager) DetachDisk(r *rbdPlugin, deviceMountPath string, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) DetachBlockDisk(r rbdDiskUnmapper, device string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
ok := fake.rbdDevices[device]
if !ok {
return fmt.Errorf("rbd: failed to detach device %s, it does not exist", device)
}
delete(fake.rbdDevices, device)
return nil
}
func (fake *fakeDiskManager) CreateImage(provisioner *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, volumeSizeGB int, err error) {
return nil, 0, fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) DeleteImage(deleter *rbdVolumeDeleter) error {
return fmt.Errorf("not implemented")
}
func (fake *fakeDiskManager) Fencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if ok && isLocked {
// not expected in testing
return fmt.Errorf("%s is already locked", key)
}
fake.rbdImageLocks[key] = true
return nil
}
func (fake *fakeDiskManager) Defencing(r rbdMounter, nodeName string) error {
fake.mutex.Lock()
defer fake.mutex.Unlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
if !ok || !isLocked {
// not expected in testing
return fmt.Errorf("%s is not locked", key)
}
delete(fake.rbdImageLocks, key)
return nil
}
func (fake *fakeDiskManager) IsLocked(r rbdMounter, nodeName string) (bool, error) {
fake.mutex.RLock()
defer fake.mutex.RUnlock()
key := fmt.Sprintf("%s/%s", r.Pool, r.Image)
isLocked, ok := fake.rbdImageLocks[key]
return ok && isLocked, nil
}
func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
return resource.Quantity{}, fmt.Errorf("not implemented")
}
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
log := fakeMounter.GetLog()
if len(log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(log))
}
lastIndex := len(log) - 1
lastAction := log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
}
}
func doTestPlugin(t *testing.T, c *testcase) {
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, c.root, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
fakeNodeName := types.NodeName("localhost")
fdm := newFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
deviceAttachPath, err := attacher.Attach(c.spec, fakeNodeName)
if err != nil {
t.Fatal(err)
}
devicePath, err := attacher.WaitForAttach(c.spec, deviceAttachPath, c.pod, time.Second*10)
if err != nil {
t.Fatal(err)
}
if devicePath != c.expectedDevicePath {
t.Errorf("Unexpected path, expected %q, not: %q", c.expectedDevicePath, devicePath)
}
deviceMountPath, err := attacher.GetDeviceMountPath(c.spec)
if err != nil {
t.Fatal(err)
}
if deviceMountPath != c.expectedDeviceMountPath {
t.Errorf("Unexpected mount path, expected %q, not: %q", c.expectedDeviceMountPath, deviceMountPath)
}
err = attacher.MountDevice(c.spec, devicePath, deviceMountPath, volume.DeviceMounterArgs{})
if err != nil {
t.Fatal(err)
}
if _, err := os.Stat(deviceMountPath); err != nil {
if os.IsNotExist(err) {
t.Errorf("Attacher.MountDevice() failed, device mount path not created: %s", deviceMountPath)
} else {
t.Errorf("Attacher.MountDevice() failed: %v", err)
}
}
loggedSource, err := getLoggedSource(devicePath)
if err != nil {
t.Fatal(err)
}
checkMounterLog(t, fakeMounter, 1, mount.FakeAction{Action: "mount", Target: c.expectedDeviceMountPath, Source: loggedSource, FSType: "ext4"})
// mounter
mounter, err := plug.(*rbdPlugin).newMounterInternal(c.spec, c.pod.UID, fdm, "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
path := mounter.GetPath()
if path != c.expectedPodMountPath {
t.Errorf("Unexpected path, expected %q, got: %q", c.expectedPodMountPath, path)
}
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
checkMounterLog(t, fakeMounter, 2, mount.FakeAction{Action: "mount", Target: c.expectedPodMountPath, Source: loggedSource, FSType: ""})
// unmounter
unmounter, err := plug.(*rbdPlugin).newUnmounterInternal(c.spec.Name(), c.pod.UID, fdm)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
checkMounterLog(t, fakeMounter, 3, mount.FakeAction{Action: "unmount", Target: c.expectedPodMountPath, Source: "", FSType: ""})
// detacher
detacher, err := plug.(*rbdPlugin).newDetacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
err = detacher.UnmountDevice(deviceMountPath)
if err != nil {
t.Fatalf("Detacher.UnmountDevice failed to unmount %s", deviceMountPath)
}
checkMounterLog(t, fakeMounter, 4, mount.FakeAction{Action: "unmount", Target: c.expectedDeviceMountPath, Source: "", FSType: ""})
err = detacher.Detach(deviceMountPath, fakeNodeName)
if err != nil {
t.Fatalf("Detacher.Detach failed to detach %s from %s", deviceMountPath, fakeNodeName)
}
}
type testcase struct {
spec *volume.Spec
root string
pod *v1.Pod
expectedDevicePath string
expectedDeviceMountPath string
expectedPodMountPath string
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
tmpDir, err = filepath.EvalSymlinks(tmpDir)
if err != nil {
t.Fatal(err)
}
expectedDevicePath := "/dev/rbd0"
if runtime.GOOS == "windows" {
// Windows expects Disk Numbers.
expectedDevicePath = "0"
}
podUID := uuid.NewUUID()
var cases []*testcase
cases = append(cases, &testcase{
spec: volume.NewSpecFromVolume(&v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool1",
RBDImage: "image1",
FSType: "ext4",
},
},
}),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: expectedDevicePath,
expectedDeviceMountPath: filepath.Join(tmpDir, "plugins/kubernetes.io/rbd/mounts/pool1-image-image1"),
expectedPodMountPath: filepath.Join(tmpDir, "pods", string(podUID), "volumes/kubernetes.io~rbd/vol1"),
})
cases = append(cases, &testcase{
spec: volume.NewSpecFromPersistentVolume(&v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol2",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "pool2",
RBDImage: "image2",
FSType: "ext4",
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany},
},
}, false),
root: tmpDir,
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "testpod",
Namespace: "testns",
UID: podUID,
},
},
expectedDevicePath: expectedDevicePath,
expectedDeviceMountPath: filepath.Join(tmpDir, "plugins/kubernetes.io/rbd/mounts/pool2-image-image2"),
expectedPodMountPath: filepath.Join(tmpDir, "pods", string(podUID), "volumes/kubernetes.io~rbd/vol2"),
})
for i := 0; i < len(cases); i++ {
doTestPlugin(t, cases[i])
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestGetSecretNameAndNamespace(t *testing.T) {
secretName := "test-secret-name"
secretNamespace := "test-secret-namespace"
volSpec := &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
secretRef := new(v1.SecretReference)
secretRef.Name = secretName
secretRef.Namespace = secretNamespace
volSpec.PersistentVolume.Spec.PersistentVolumeSource.RBD.SecretRef = secretRef
foundSecretName, foundSecretNamespace, err := getSecretNameAndNamespace(volSpec, "default")
if err != nil {
t.Errorf("getSecretNameAndNamespace failed to get Secret's name and namespace: %v", err)
}
if strings.Compare(secretName, foundSecretName) != 0 || strings.Compare(secretNamespace, foundSecretNamespace) != 0 {
t.Errorf("getSecretNameAndNamespace returned incorrect values, expected %s and %s but got %s and %s", secretName, secretNamespace, foundSecretName, foundSecretNamespace)
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestGetDeviceMountPath(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fdm := newFakeDiskManager()
// attacher
attacher, err := plug.(*rbdPlugin).newAttacherInternal(fdm)
if err != nil {
t.Errorf("Failed to make a new Attacher: %v", err)
}
pool, image := "pool", "image"
spec := volume.NewSpecFromVolume(&v1.Volume{
Name: "vol",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: pool,
RBDImage: image,
FSType: "ext4",
},
},
})
deprecatedDir := filepath.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/rbd/rbd/%s-image-%s", pool, image))
canonicalDir := filepath.Join(tmpDir, fmt.Sprintf("plugins/kubernetes.io/rbd/mounts/%s-image-%s", pool, image))
type testCase struct {
deprecated bool
targetPath string
}
for _, c := range []testCase{
{false, canonicalDir},
{true, deprecatedDir},
} {
if c.deprecated {
// This is a deprecated device mount path, we create it,
// and hope attacher.GetDeviceMountPath return c.targetPath.
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create deprecated mount path failed: %v", err)
}
}
mountPath, err := attacher.GetDeviceMountPath(spec)
if err != nil {
t.Fatalf("GetDeviceMountPath failed: %v", err)
}
if mountPath != c.targetPath {
t.Errorf("Mismatch device mount path: wanted %s, got %s", c.targetPath, mountPath)
}
}
}
// https://github.com/kubernetes/kubernetes/issues/57744
func TestConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skipf("TestConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
}
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
fakeMounter := fakeVolumeHost.GetMounter(plug.GetPluginName()).(*mount.FakeMounter)
pool, image, volumeName := "pool", "image", "vol"
podMountPath := filepath.Join(tmpDir, "pods/pod123/volumes/kubernetes.io~rbd", volumeName)
deprecatedDir := filepath.Join(tmpDir, "plugins/kubernetes.io/rbd/rbd", fmt.Sprintf("%s-image-%s", pool, image))
canonicalDir := filepath.Join(tmpDir, "plugins/kubernetes.io/rbd/mounts", fmt.Sprintf("%s-image-%s", pool, image))
type testCase struct {
volumeName string
targetPath string
}
for _, c := range []testCase{
{"vol", canonicalDir},
{"vol", deprecatedDir},
} {
if err := os.MkdirAll(c.targetPath, 0700); err != nil {
t.Fatalf("Create mount path %s failed: %v", c.targetPath, err)
}
if err = fakeMounter.Mount("/dev/rbd0", c.targetPath, "fake", nil); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
if err = fakeMounter.Mount(c.targetPath, podMountPath, "fake", []string{"bind"}); err != nil {
t.Fatalf("Mount %s to %s failed: %v", c.targetPath, podMountPath, err)
}
rec, err := plug.ConstructVolumeSpec(c.volumeName, podMountPath)
if err != nil {
t.Errorf("ConstructVolumeSpec failed: %v", err)
} else {
if rec.Spec.Volume.RBD.RBDPool != pool {
t.Errorf("Mismatch rbd pool: wanted %s, got %s", pool, rec.Spec.Volume.RBD.RBDPool)
}
if rec.Spec.Volume.RBD.RBDImage != image {
t.Fatalf("Mismatch rbd image: wanted %s, got %s", image, rec.Spec.Volume.RBD.RBDImage)
}
}
if err = fakeMounter.Unmount(podMountPath); err != nil {
t.Fatalf("Unmount pod path %s failed: %v", podMountPath, err)
}
if err = fakeMounter.Unmount(c.targetPath); err != nil {
t.Fatalf("Unmount device path %s failed: %v", c.targetPath, err)
}
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
modes := plug.GetAccessModes()
for _, v := range modes {
if !volumetest.ContainsAccessMode(modes, v) {
t.Errorf("Expected AccessModeTypes: %s", v)
}
}
}
func TestRequiresRemount(t *testing.T) {
tmpDir, _ := utiltesting.MkTmpdir("rbd_test")
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, _ := plugMgr.FindPluginByName("kubernetes.io/rbd")
has := plug.RequiresRemount(nil)
if has {
t.Errorf("Expected RequiresRemount to be false, got %t", has)
}
}
func TestGetRbdImageSize(t *testing.T) {
for i, c := range []struct {
Output string
TargetSize int
}{
{
Output: `{"name":"kubernetes-dynamic-pvc-18e7a4d9-050d-11e9-b905-548998f3478f","size":10737418240,"objects":2560,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.9f4ff7238e1f29","format":2}`,
TargetSize: 10240,
},
{
Output: `{"name":"kubernetes-dynamic-pvc-070635bf-e33f-11e8-aab7-548998f3478f","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.670ac4238e1f29","format":2}`,
TargetSize: 1024,
},
} {
size, err := getRbdImageSize([]byte(c.Output))
if err != nil {
t.Errorf("Case %d: getRbdImageSize failed: %v", i, err)
continue
}
if size != c.TargetSize {
t.Errorf("Case %d: unexpected size, wanted %d, got %d", i, c.TargetSize, size)
}
}
}
func TestGetRbdImageInfo(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
for i, c := range []struct {
DeviceMountPath string
TargetRbdImageInfo *rbdImageInfo
}{
{
DeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/rbd/pool1-image-image1", tmpDir),
TargetRbdImageInfo: &rbdImageInfo{pool: "pool1", name: "image1"},
},
{
DeviceMountPath: fmt.Sprintf("%s/plugins/kubernetes.io/rbd/mounts/pool2-image-image2", tmpDir),
TargetRbdImageInfo: &rbdImageInfo{pool: "pool2", name: "image2"},
},
} {
rbdImageInfo, err := getRbdImageInfo(c.DeviceMountPath)
if err != nil {
t.Errorf("Case %d: getRbdImageInfo failed: %v", i, err)
continue
}
if !reflect.DeepEqual(rbdImageInfo, c.TargetRbdImageInfo) {
t.Errorf("Case %d: unexpected RbdImageInfo, wanted %v, got %v", i, c.TargetRbdImageInfo, rbdImageInfo)
}
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -1,38 +0,0 @@
//go:build !windows
// +build !windows
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
)
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
devicePath := fmt.Sprintf("/dev/rbd%d", fake.rbdMapIndex)
fake.rbdDevices[devicePath] = true
// Increment rbdMapIndex afterwards, so we can start from rbd0.
fake.rbdMapIndex++
return devicePath, nil
}
func getLoggedSource(devicePath string) (string, error) {
return devicePath, nil
}

View File

@ -1,812 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
utilpath "k8s.io/utils/path"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/kubernetes/pkg/volume"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
imageWatcherStr = "watcher="
kubeLockMagic = "kubelet_lock_magic_"
// The following three values are used for 30 seconds timeout
// while waiting for RBD Watcher to expire.
rbdImageWatcherInitDelay = 1 * time.Second
rbdImageWatcherFactor = 1.4
rbdImageWatcherSteps = 10
rbdImageSizeUnitMiB = 1024 * 1024
)
// A struct contains rbd image info.
type rbdImageInfo struct {
pool string
name string
}
func getDevFromImageAndPool(pool, image string) (string, bool) {
device, found := getRbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
device, found = getNbdDevFromImageAndPool(pool, image)
if found {
return device, true
}
return "", false
}
// Search /sys/bus for rbd device that matches given pool and image.
func getRbdDevFromImageAndPool(pool string, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sysPath := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sysPath); err == nil {
for _, f := range dirs {
// Pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// First match pool, then match name.
poolFile := filepath.Join(sysPath, name, "pool")
poolBytes, err := ioutil.ReadFile(poolFile)
if err != nil {
klog.V(4).Infof("error reading %s: %v", poolFile, err)
continue
}
if strings.TrimSpace(string(poolBytes)) != pool {
klog.V(4).Infof("device %s is not %q: %q", name, pool, string(poolBytes))
continue
}
imgFile := filepath.Join(sysPath, name, "name")
imgBytes, err := ioutil.ReadFile(imgFile)
if err != nil {
klog.V(4).Infof("error reading %s: %v", imgFile, err)
continue
}
if strings.TrimSpace(string(imgBytes)) != image {
klog.V(4).Infof("device %s is not %q: %q", name, image, string(imgBytes))
continue
}
// Found a match, check if device exists.
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}
func getMaxNbds() (int, error) {
// the max number of nbd devices may be found in maxNbdsPath
// we will check sysfs for possible nbd devices even if this is not available
maxNbdsPath := "/sys/module/nbd/parameters/nbds_max"
_, err := os.Lstat(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to retrieve max_nbds from %s err: %q", maxNbdsPath, err)
}
klog.V(4).Infof("found nbds max parameters file at %s", maxNbdsPath)
maxNbdBytes, err := ioutil.ReadFile(maxNbdsPath)
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds from %s err: %q", maxNbdsPath, err)
}
maxNbds, err := strconv.Atoi(strings.TrimSpace(string(maxNbdBytes)))
if err != nil {
return 0, fmt.Errorf("rbd-nbd: failed to read max_nbds err: %q", err)
}
klog.V(4).Infof("rbd-nbd: max_nbds: %d", maxNbds)
return maxNbds, nil
}
// Locate any existing rbd-nbd process mapping given a <pool, image>.
// Recent versions of rbd-nbd tool can correctly provide this info using list-mapped
// but older versions of list-mapped don't.
// The implementation below peeks at the command line of nbd bound processes
// to figure out any mapped images.
func getNbdDevFromImageAndPool(pool string, image string) (string, bool) {
// nbd module exports the pid of serving process in sysfs
basePath := "/sys/block/nbd"
// Do not change imgPath format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", pool, image)
maxNbds, maxNbdsErr := getMaxNbds()
if maxNbdsErr != nil {
klog.V(4).Infof("error reading nbds_max %v", maxNbdsErr)
return "", false
}
for i := 0; i < maxNbds; i++ {
nbdPath := basePath + strconv.Itoa(i)
_, err := os.Lstat(nbdPath)
if err != nil {
klog.V(4).Infof("error reading nbd info directory %s: %v", nbdPath, err)
continue
}
pidBytes, err := ioutil.ReadFile(filepath.Join(nbdPath, "pid"))
if err != nil {
klog.V(5).Infof("did not find valid pid file in dir %s: %v", nbdPath, err)
continue
}
cmdlineFileName := filepath.Join("/proc", strings.TrimSpace(string(pidBytes)), "cmdline")
rawCmdline, err := ioutil.ReadFile(cmdlineFileName)
if err != nil {
klog.V(4).Infof("failed to read cmdline file %s: %v", cmdlineFileName, err)
continue
}
cmdlineArgs := strings.FieldsFunc(string(rawCmdline), func(r rune) bool {
return r == '\u0000'
})
// Check if this process is mapping a rbd device.
// Only accepted pattern of cmdline is from execRbdMap:
// rbd-nbd map pool/image ...
if len(cmdlineArgs) < 3 || cmdlineArgs[0] != "rbd-nbd" || cmdlineArgs[1] != "map" {
klog.V(4).Infof("nbd device %s is not used by rbd", nbdPath)
continue
}
if cmdlineArgs[2] != imgPath {
klog.V(4).Infof("rbd-nbd device %s did not match expected image path: %s with path found: %s",
nbdPath, imgPath, cmdlineArgs[2])
continue
}
devicePath := filepath.Join("/dev", "nbd"+strconv.Itoa(i))
if _, err := os.Lstat(devicePath); err != nil {
klog.Warningf("Stat device %s for imgpath %s failed %v", devicePath, imgPath, err)
continue
}
return devicePath, true
}
return "", false
}
// Stat a path, if it doesn't exist, retry maxRetries times.
func waitForPath(pool, image string, maxRetries int, useNbdDriver bool) (string, bool) {
for i := 0; i < maxRetries; i++ {
if i != 0 {
time.Sleep(time.Second)
}
if useNbdDriver {
if devicePath, found := getNbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
} else {
if devicePath, found := getRbdDevFromImageAndPool(pool, image); found {
return devicePath, true
}
}
}
return "", false
}
// Execute command to map a rbd device for mounter.
// rbdCmd is driver dependent and either "rbd" or "rbd-nbd".
func execRbdMap(b rbdMounter, rbdCmd string, mon string) ([]byte, error) {
// Commandline: rbdCmd map imgPath ...
// do not change this format - some tools like rbd-nbd are strict about it.
imgPath := fmt.Sprintf("%s/%s", b.Pool, b.Image)
if b.Secret != "" {
return b.exec.Command(rbdCmd,
"map", imgPath, "--id", b.ID, "-m", mon, "--key="+b.Secret).CombinedOutput()
}
return b.exec.Command(rbdCmd,
"map", imgPath, "--id", b.ID, "-m", mon, "-k", b.Keyring).CombinedOutput()
}
// Check if rbd-nbd tools are installed.
func checkRbdNbdTools(e utilexec.Interface) bool {
_, err := e.Command("modprobe", "nbd").CombinedOutput()
if err != nil {
klog.V(5).Infof("rbd-nbd: nbd modprobe failed with error %v", err)
return false
}
if _, err := e.Command("rbd-nbd", "--version").CombinedOutput(); err != nil {
klog.V(5).Infof("rbd-nbd: getting rbd-nbd version failed with error %v", err)
return false
}
klog.V(3).Infof("rbd-nbd tools were found.")
return true
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/mounts/pool-image-image.
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
// Backward compatibility for the deprecated format: /var/lib/kubelet/plugins/kubernetes.io/rbd/rbd/pool-image-image.
deprecatedDir := filepath.Join(host.GetPluginDir(rbdPluginName), "rbd", pool+"-image-"+image)
info, err := os.Stat(deprecatedDir)
if err == nil && info.IsDir() {
// The device mount path has already been created with the deprecated format, return it.
klog.V(5).Infof("Deprecated format path %s found", deprecatedDir)
return deprecatedDir
}
// Return the canonical format path.
return filepath.Join(host.GetPluginDir(rbdPluginName), volutil.MountsInGlobalPDPath, pool+"-image-"+image)
}
// Make a directory like /var/lib/kubelet/plugins/kubernetes.io/rbd/volumeDevices/pool-image-image.
func makeVDPDNameInternal(host volume.VolumeHost, pool string, image string) string {
return filepath.Join(host.GetVolumeDevicePluginDir(rbdPluginName), pool+"-image-"+image)
}
// rbdUtil implements diskManager interface.
type rbdUtil struct{}
var _ diskManager = &rbdUtil{}
// MakeGlobalPDName makes a plugin directory.
func (util *rbdUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
// MakeGlobalVDPDName makes a volume device plugin directory.
func (util *rbdUtil) MakeGlobalVDPDName(rbd rbd) string {
return makeVDPDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
}
func rbdErrors(runErr, resultErr error) error {
if err, ok := runErr.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
return fmt.Errorf("rbd: rbd cmd not found")
}
}
return resultErr
}
// 'rbd' utility builds a comma-separated list of monitor addresses from '-m' /
// '--mon_host` parameter (comma, semi-colon, or white-space delimited monitor
// addresses) and send it to kernel rbd/libceph modules, which can accept
// comma-separated list of monitor addresses (e.g. ip1[:port1][,ip2[:port2]...])
// in their first version in linux (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/ceph_common.c#L239).
// Also, libceph module chooses monitor randomly, so we can simply pass all
// addresses without randomization (see
// https://github.com/torvalds/linux/blob/602adf400201636e95c3fed9f31fba54a3d7e844/net/ceph/mon_client.c#L132).
func (util *rbdUtil) kernelRBDMonitorsOpt(mons []string) string {
return strings.Join(mons, ",")
}
// rbdUnlock releases a lock on image if found.
func (util *rbdUtil) rbdUnlock(b rbdMounter) error {
var err error
var output, locker string
var cmd []byte
var secretOpt []string
if b.Secret != "" {
secretOpt = []string{"--key=" + b.Secret}
} else {
secretOpt = []string{"-k", b.Keyring}
}
if len(b.adminID) == 0 {
b.adminID = b.ID
}
if len(b.adminSecret) == 0 {
b.adminSecret = b.Secret
}
// Construct lock id using host name and a magic prefix.
hostName, err := nodeutil.GetHostname("")
if err != nil {
return err
}
lockID := kubeLockMagic + hostName
mon := util.kernelRBDMonitorsOpt(b.Mon)
// Get the locker name, something like "client.1234".
args := []string{"lock", "list", b.Image, "--pool", b.Pool, "--id", b.ID, "-m", mon}
args = append(args, secretOpt...)
cmd, err = b.exec.Command("rbd", args...).CombinedOutput()
output = string(cmd)
klog.V(4).Infof("lock list output %q", output)
if err != nil {
return err
}
ind := strings.LastIndex(output, lockID) - 1
for i := ind; i >= 0; i-- {
if output[i] == '\n' {
locker = output[(i + 1):ind]
break
}
}
// Remove a lock if found: rbd lock remove.
if len(locker) > 0 {
args := []string{"lock", "remove", b.Image, lockID, locker, "--pool", b.Pool, "--id", b.ID, "-m", mon}
args = append(args, secretOpt...)
_, err = b.exec.Command("rbd", args...).CombinedOutput()
if err == nil {
klog.V(4).Infof("rbd: successfully remove lock (locker_id: %s) on image: %s/%s with id %s mon %s", lockID, b.Pool, b.Image, b.ID, mon)
} else {
klog.Warningf("rbd: failed to remove lock (lockID: %s) on image: %s/%s with id %s mon %s: %v", lockID, b.Pool, b.Image, b.ID, mon, err)
}
}
return err
}
// AttachDisk attaches the disk on the node.
func (util *rbdUtil) AttachDisk(b rbdMounter) (string, error) {
var output []byte
globalPDPath := util.MakeGlobalPDName(*b.rbd)
if pathExists, pathErr := mount.PathExists(globalPDPath); pathErr != nil {
return "", fmt.Errorf("error checking if path exists: %v", pathErr)
} else if !pathExists {
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return "", err
}
}
// Evaluate whether this device was mapped with rbd.
devicePath, mapped := waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, false /*useNbdDriver*/)
// If rbd-nbd tools are found, we will fallback to it should the default krbd driver fail.
nbdToolsFound := false
if !mapped {
nbdToolsFound = checkRbdNbdTools(b.exec)
if nbdToolsFound {
devicePath, mapped = waitForPath(b.Pool, b.Image, 1 /*maxRetries*/, true /*useNbdDriver*/)
}
}
if !mapped {
// Currently, we don't acquire advisory lock on image, but for backward
// compatibility, we need to check if the image is being used by nodes running old kubelet.
// osd_client_watch_timeout defaults to 30 seconds, if the watcher stays active longer than 30 seconds,
// rbd image does not get mounted and failure message gets generated.
backoff := wait.Backoff{
Duration: rbdImageWatcherInitDelay,
Factor: rbdImageWatcherFactor,
Steps: rbdImageWatcherSteps,
}
needValidUsed := true
if b.accessModes != nil {
// If accessModes only contains ReadOnlyMany, we don't need check rbd status of being used.
if len(b.accessModes) == 1 && b.accessModes[0] == v1.ReadOnlyMany {
needValidUsed = false
}
}
// If accessModes is nil, the volume is referenced by in-line volume.
// We can assume the AccessModes to be {"RWO" and "ROX"}, which is what the volume plugin supports.
// We do not need to consider ReadOnly here, because it is used for VolumeMounts.
if needValidUsed {
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := util.rbdStatus(&b)
if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
}
return !used, nil
})
// Return error if rbd image has not become available for the specified timeout.
if err == wait.ErrWaitTimeout {
return "", fmt.Errorf("rbd image %s/%s is still being used", b.Pool, b.Image)
}
// Return error if any other errors were encountered during waiting for the image to become available.
if err != nil {
return "", err
}
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
klog.V(1).Infof("rbd: map mon %s", mon)
_, err := b.exec.Command("modprobe", "rbd").CombinedOutput()
if err != nil {
klog.Warningf("rbd: failed to load rbd kernel module:%v", err)
}
output, err = execRbdMap(b, "rbd", mon)
if err != nil {
if !nbdToolsFound {
klog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
}
klog.V(3).Infof("rbd: map failed with %v, %s. Retrying with rbd-nbd", err, string(output))
errList := []error{err}
outputList := output
output, err = execRbdMap(b, "rbd-nbd", mon)
if err != nil {
errList = append(errList, err)
outputList = append(outputList, output...)
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", errors.NewAggregate(errList), string(outputList))
}
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, true /*useNbdDrive*/)
} else {
devicePath, mapped = waitForPath(b.Pool, b.Image, 10 /*maxRetries*/, false /*useNbdDriver*/)
}
if !mapped {
return "", fmt.Errorf("could not map image %s/%s, Timeout after 10s", b.Pool, b.Image)
}
}
return devicePath, nil
}
// DetachDisk detaches the disk from the node.
// It detaches device from the node if device is provided, and removes the lock
// if there is persisted RBD info under deviceMountPath.
func (util *rbdUtil) DetachDisk(plugin *rbdPlugin, deviceMountPath string, device string) error {
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := plugin.host.GetExec(plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough for unmap
// the tool to unmap is based on device type
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
} else {
rbdCmd = "rbd"
}
// rbd unmap
output, err := exec.Command(rbdCmd, "unmap", device).CombinedOutput()
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output)))
}
klog.V(3).Infof("rbd: successfully unmap device %s", device)
// Currently, we don't persist rbd info on the disk, but for backward
// compatibility, we need to clean it if found.
rbdFile := filepath.Join(deviceMountPath, "rbd.json")
exists, err := utilpath.Exists(utilpath.CheckFollowSymlink, rbdFile)
if err != nil {
return err
}
if exists {
klog.V(3).Infof("rbd: old rbd.json is found under %s, cleaning it", deviceMountPath)
err = util.cleanOldRBDFile(plugin, rbdFile)
if err != nil {
klog.Errorf("rbd: failed to clean %s", rbdFile)
return err
}
klog.V(3).Infof("rbd: successfully remove %s", rbdFile)
}
return nil
}
// DetachBlockDisk detaches the disk from the node.
func (util *rbdUtil) DetachBlockDisk(disk rbdDiskUnmapper, mapPath string) error {
if pathExists, pathErr := mount.PathExists(mapPath); pathErr != nil {
return fmt.Errorf("error checking if path exists: %v", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmap skipped because path does not exist: %v", mapPath)
return nil
}
// If we arrive here, device is no longer used, see if we need to logout of the target
device, err := getBlockVolumeDevice(mapPath)
if err != nil {
return err
}
if len(device) == 0 {
return fmt.Errorf("DetachDisk failed , device is empty")
}
exec := disk.plugin.host.GetExec(disk.plugin.GetPluginName())
var rbdCmd string
// Unlike map, we cannot fallthrough here.
// Any nbd device must be unmapped by rbd-nbd
if strings.HasPrefix(device, "/dev/nbd") {
rbdCmd = "rbd-nbd"
klog.V(4).Infof("rbd: using rbd-nbd for unmap function")
} else {
rbdCmd = "rbd"
klog.V(4).Infof("rbd: using rbd for unmap function")
}
// rbd unmap
output, err := exec.Command(rbdCmd, "unmap", device).CombinedOutput()
if err != nil {
return rbdErrors(err, fmt.Errorf("rbd: failed to unmap device %s, error %v, rbd output: %s", device, err, string(output)))
}
klog.V(3).Infof("rbd: successfully unmap device %s", device)
return nil
}
// cleanOldRBDFile read rbd info from rbd.json file and removes lock if found.
// At last, it removes rbd.json file.
func (util *rbdUtil) cleanOldRBDFile(plugin *rbdPlugin, rbdFile string) error {
mounter := &rbdMounter{
// util.rbdUnlock needs it to run command.
rbd: newRBD("", "", "", "", false, plugin, util),
}
fp, err := os.Open(rbdFile)
if err != nil {
return fmt.Errorf("rbd: open err %s/%s", rbdFile, err)
}
defer fp.Close()
decoder := json.NewDecoder(fp)
if err = decoder.Decode(mounter); err != nil {
return fmt.Errorf("rbd: decode err: %v", err)
}
// Remove rbd lock if found.
// The disk is not attached to this node anymore, so the lock on image
// for this node can be removed safely.
err = util.rbdUnlock(*mounter)
if err == nil {
os.Remove(rbdFile)
}
return err
}
// CreateImage creates a RBD image.
func (util *rbdUtil) CreateImage(p *rbdVolumeProvisioner) (r *v1.RBDPersistentVolumeSource, size int, err error) {
var output []byte
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// Convert to MB that rbd defaults on.
sz, err := volumehelpers.RoundUpToMiBInt(capacity)
if err != nil {
return nil, 0, err
}
volSz := fmt.Sprintf("%d", sz)
mon := util.kernelRBDMonitorsOpt(p.Mon)
if p.rbdMounter.imageFormat == rbdImageFormat2 {
klog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key <masked>", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, p.rbdMounter.imageFeatures, mon, p.rbdMounter.Pool, p.rbdMounter.adminID)
} else {
klog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key <masked>", p.rbdMounter.Image, volSz, p.rbdMounter.imageFormat, mon, p.rbdMounter.Pool, p.rbdMounter.adminID)
}
args := []string{"create", p.rbdMounter.Image, "--size", volSz, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminID, "-m", mon, "--key=" + p.rbdMounter.adminSecret, "--image-format", p.rbdMounter.imageFormat}
if p.rbdMounter.imageFormat == rbdImageFormat2 {
// If no image features is provided, it results in empty string
// which disable all RBD image format 2 features as expected.
features := strings.Join(p.rbdMounter.imageFeatures, ",")
args = append(args, "--image-feature", features)
}
output, err = p.exec.Command("rbd", args...).CombinedOutput()
if err != nil {
klog.Warningf("failed to create rbd image, output %v", string(output))
return nil, 0, fmt.Errorf("failed to create rbd image: %v, command output: %s", err, string(output))
}
return &v1.RBDPersistentVolumeSource{
CephMonitors: p.rbdMounter.Mon,
RBDImage: p.rbdMounter.Image,
RBDPool: p.rbdMounter.Pool,
}, sz, nil
}
// DeleteImage deletes a RBD image.
func (util *rbdUtil) DeleteImage(p *rbdVolumeDeleter) error {
var output []byte
found, rbdOutput, err := util.rbdStatus(p.rbdMounter)
if err != nil {
return fmt.Errorf("error %v, rbd output: %v", err, rbdOutput)
}
if found {
klog.Infof("rbd %s is still being used ", p.rbdMounter.Image)
return fmt.Errorf("rbd image %s/%s is still being used, rbd output: %v", p.rbdMounter.Pool, p.rbdMounter.Image, rbdOutput)
}
// rbd rm.
mon := util.kernelRBDMonitorsOpt(p.rbdMounter.Mon)
klog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key <masked>", p.rbdMounter.Image, mon, p.rbdMounter.Pool, p.rbdMounter.adminID)
output, err = p.exec.Command("rbd",
"rm", p.rbdMounter.Image, "--pool", p.rbdMounter.Pool, "--id", p.rbdMounter.adminID, "-m", mon, "--key="+p.rbdMounter.adminSecret).CombinedOutput()
if err == nil {
return nil
}
klog.Errorf("failed to delete rbd image: %v, command output: %s", err, string(output))
return fmt.Errorf("error %v, rbd output: %v", err, string(output))
}
// ExpandImage runs rbd resize command to resize the specified image.
func (util *rbdUtil) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
var output []byte
var err error
// Convert to MB that rbd defaults on.
sz, err := volumehelpers.RoundUpToMiBInt(newSize)
if err != nil {
return oldSize, err
}
newVolSz := fmt.Sprintf("%d", sz)
newSizeQuant := resource.MustParse(fmt.Sprintf("%dMi", sz))
// Check the current size of rbd image, if equals to or greater that the new request size, do nothing.
curSize, infoErr := util.rbdInfo(rbdExpander.rbdMounter)
if infoErr != nil {
return oldSize, fmt.Errorf("rbd info failed, error: %v", infoErr)
}
if curSize >= sz {
return newSizeQuant, nil
}
// rbd resize.
mon := util.kernelRBDMonitorsOpt(rbdExpander.rbdMounter.Mon)
klog.V(4).Infof("rbd: resize %s using mon %s, pool %s id %s key <masked>", rbdExpander.rbdMounter.Image, mon, rbdExpander.rbdMounter.Pool, rbdExpander.rbdMounter.adminID)
output, err = rbdExpander.exec.Command("rbd",
"resize", rbdExpander.rbdMounter.Image, "--size", newVolSz, "--pool", rbdExpander.rbdMounter.Pool, "--id", rbdExpander.rbdMounter.adminID, "-m", mon, "--key="+rbdExpander.rbdMounter.adminSecret).CombinedOutput()
if err == nil {
return newSizeQuant, nil
}
klog.Errorf("failed to resize rbd image: %v, command output: %s", err, string(output))
return oldSize, err
}
// rbdInfo runs `rbd info` command to get the current image size in MB.
func (util *rbdUtil) rbdInfo(b *rbdMounter) (int, error) {
var err error
var output []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminID
secret := b.adminSecret
if id == "" {
id = b.ID
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd info" get the image info with the following output:
//
// # image exists (exit=0)
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08
// size 1024 MB in 256 objects
// order 22 (4096 kB objects)
// block_name_prefix: rbd_data.1253ac238e1f29
// format: 2
// ...
//
// rbd info volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08 --format json
// {"name":"volume-4a5bcc8b-2b55-46da-ba04-0d3dc5227f08","size":1073741824,"objects":256,"order":22,"object_size":4194304,"block_name_prefix":"rbd_data.1253ac238e1f29","format":2,"features":["layering","exclusive-lock","object-map","fast-diff","deep-flatten"],"flags":[]}
//
//
// # image does not exist (exit=2)
// rbd: error opening image 1234: (2) No such file or directory
//
klog.V(4).Infof("rbd: info %s using mon %s, pool %s id %s key <masked>", b.Image, mon, b.Pool, id)
output, err = b.exec.Command("rbd",
"info", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret, "-k=/dev/null", "--format=json").Output()
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
klog.Errorf("rbd cmd not found")
// fail fast if rbd command is not found.
return 0, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return 0, err
}
if len(output) == 0 {
return 0, fmt.Errorf("can not get image size info %s: %s", b.Image, string(output))
}
return getRbdImageSize(output)
}
func getRbdImageSize(output []byte) (int, error) {
info := struct {
Size int64 `json:"size"`
}{}
if err := json.Unmarshal(output, &info); err != nil {
return 0, fmt.Errorf("parse rbd info output failed: %s, %v", string(output), err)
}
return int(info.Size / rbdImageSizeUnitMiB), nil
}
// rbdStatus runs `rbd status` command to check if there is watcher on the image.
func (util *rbdUtil) rbdStatus(b *rbdMounter) (bool, string, error) {
var err error
var output string
var cmd []byte
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.adminID
secret := b.adminSecret
if id == "" {
id = b.ID
secret = b.Secret
}
mon := util.kernelRBDMonitorsOpt(b.Mon)
// cmd "rbd status" list the rbd client watch with the following output:
//
// # there is a watcher (exit=0)
// Watchers:
// watcher=10.16.153.105:0/710245699 client.14163 cookie=1
//
// # there is no watcher (exit=0)
// Watchers: none
//
// Otherwise, exit is non-zero, for example:
//
// # image does not exist (exit=2)
// rbd: error opening image kubernetes-dynamic-pvc-<UUID>: (2) No such file or directory
//
klog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key <masked>", b.Image, mon, b.Pool, id)
cmd, err = b.exec.Command("rbd",
"status", b.Image, "--pool", b.Pool, "-m", mon, "--id", id, "--key="+secret).CombinedOutput()
output = string(cmd)
if err, ok := err.(*exec.Error); ok {
if err.Err == exec.ErrNotFound {
klog.Errorf("rbd cmd not found")
// fail fast if command not found
return false, output, err
}
}
// If command never succeed, returns its last error.
if err != nil {
return false, output, err
}
if strings.Contains(output, imageWatcherStr) {
klog.V(4).Infof("rbd: watchers on %s: %s", b.Image, output)
return true, output, nil
}
klog.Warningf("rbd: no watchers on %s", b.Image)
return false, output, nil
}
// getRbdImageInfo try to get rbdImageInfo from deviceMountPath.
func getRbdImageInfo(deviceMountPath string) (*rbdImageInfo, error) {
deviceMountedPathSeps := strings.Split(filepath.Base(deviceMountPath), "-image-")
if len(deviceMountedPathSeps) != 2 {
return nil, fmt.Errorf("can't found devicePath for %s ", deviceMountPath)
}
return &rbdImageInfo{
pool: deviceMountedPathSeps[0],
name: deviceMountedPathSeps[1],
}, nil
}

View File

@ -1,50 +0,0 @@
//go:build windows
// +build windows
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"strconv"
"k8s.io/mount-utils"
)
func (fake *fakeDiskManager) AttachDisk(b rbdMounter) (string, error) {
fake.mutex.Lock()
defer fake.mutex.Unlock()
// Windows expects Disk Numbers. We start with rbdMapIndex 0, referring to the first Disk.
volIds, err := mount.ListVolumesOnDisk(strconv.Itoa(fake.rbdMapIndex))
if err != nil {
return "", err
}
fake.rbdDevices[volIds[0]] = true
devicePath := strconv.Itoa(fake.rbdMapIndex)
fake.rbdMapIndex++
return devicePath, nil
}
func getLoggedSource(devicePath string) (string, error) {
// Windows mounter is mounting based on the Disk's Unique ID.
volIds, err := mount.ListVolumesOnDisk(devicePath)
if err != nil {
return "", err
}
return volIds[0], nil
}

View File

@ -1,325 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"crypto/md5"
"encoding/hex"
"fmt"
"strings"
"k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
RBDVolumePluginName = "kubernetes.io/rbd"
RBDDriverName = "rbd.csi.ceph.com"
defaultAdminSecretNamespace = "default"
defaultImgFeatureVal = "layering"
defaultAdminUser = "admin"
defaultPoolVal = "rbd"
defaultIntreeImagePfx = "kubernetes-dynamic-pvc-"
defaultMigKey = "migration"
defaultMigStaticVal = "true"
CSIRBDVolHandleAnnKey = "rbd.csi.ceph.com/volume-handle"
imgFeatureKey = "imageFeatures"
imgFmtKey = "imageFormat"
imgNameKey = "imageName"
clusterIDKey = "clusterID"
journalPoolKey = "journalPool"
poolKey = "pool"
monsKey = "monitors"
adminIDKey = "adminId"
staticVolKey = "staticVolume"
monsPfx = "mons-"
imgPfx = "image-"
migVolPfx = "mig"
provSecretNameKey = "csi.storage.k8s.io/provisioner-secret-name"
nodeStageSecretNameKey = "csi.storage.k8s.io/node-stage-secret-name"
cntrlExpandSecretNameKey = "csi.storage.k8s.io/controller-expand-secret-name"
provSecretNamespaceKey = "csi.storage.k8s.io/provisioner-secret-namespace"
nodeStageSecretNamespaceKey = "csi.storage.k8s.io/node-stage-secret-namespace"
cntrlExpandSecretNamespaceKey = "csi.storage.k8s.io/controller-expand-secret-namespace"
)
var _ InTreePlugin = &rbdCSITranslator{}
type rbdCSITranslator struct{}
func NewRBDCSITranslator() InTreePlugin {
return &rbdCSITranslator{}
}
// TranslateInTreeStorageClassToCSI takes in-tree storage class used by in-tree plugin
// and translates them to a storage class consumable by CSI plugin
func (p rbdCSITranslator) TranslateInTreeStorageClassToCSI(sc *storagev1.StorageClass) (*storagev1.StorageClass, error) {
if sc == nil {
return nil, fmt.Errorf("sc is nil")
}
var params = map[string]string{}
fillDefaultSCParams(params)
for k, v := range sc.Parameters {
switch strings.ToLower(k) {
case fsTypeKey:
params[csiFsTypeKey] = v
case "imagefeatures":
params[imgFeatureKey] = v
case poolKey:
params[poolKey] = v
case "imageformat":
params[imgFmtKey] = v
case "adminid":
params[adminIDKey] = v
case "adminsecretname":
params[provSecretNameKey] = v
params[nodeStageSecretNameKey] = v
params[cntrlExpandSecretNameKey] = v
case "adminsecretnamespace":
params[provSecretNamespaceKey] = v
params[nodeStageSecretNamespaceKey] = v
params[cntrlExpandSecretNamespaceKey] = v
case monsKey:
arr := strings.Split(v, ",")
if len(arr) < 1 {
return nil, fmt.Errorf("missing Ceph monitors")
}
params[monsKey] = v
params[clusterIDKey] = fmt.Sprintf("%x", md5.Sum([]byte(v)))
}
}
if params[provSecretNameKey] == "" {
return nil, fmt.Errorf("missing Ceph admin secret name")
}
if params[monsKey] == "" {
return nil, fmt.Errorf("missing Ceph monitors")
}
sc.Provisioner = RBDDriverName
sc.Parameters = params
return sc, nil
}
// TranslateInTreeInlineVolumeToCSI takes an inline volume and will translate
// the in-tree inline volume source to a CSIPersistentVolumeSource
func (p rbdCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.RBD == nil {
return nil, fmt.Errorf("volume is nil or RBDVolume not defined on volume")
}
var am v1.PersistentVolumeAccessMode
if volume.RBD.ReadOnly {
am = v1.ReadOnlyMany
} else {
am = v1.ReadWriteOnce
}
secRef := &v1.SecretReference{}
if volume.RBD.SecretRef != nil {
secRef.Name = volume.RBD.SecretRef.Name
secRef.Namespace = podNamespace
}
volumeAttr := make(map[string]string)
volumeAttr[clusterIDKey] = fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(volume.RBD.CephMonitors, ","))))
volumeAttr[poolKey] = defaultPoolVal
if volume.RBD.RBDPool != "" {
volumeAttr[poolKey] = volume.RBD.RBDPool
}
volumeAttr[staticVolKey] = defaultMigStaticVal
volumeAttr[imgFeatureKey] = defaultImgFeatureVal
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%s", RBDDriverName, volume.RBD.RBDImage),
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
VolumeHandle: volume.RBD.RBDImage,
FSType: volume.RBD.FSType,
VolumeAttributes: volumeAttr,
NodeStageSecretRef: secRef,
ControllerExpandSecretRef: secRef,
},
},
AccessModes: []v1.PersistentVolumeAccessMode{am},
},
}
return pv, nil
}
// TranslateInTreePVToCSI takes a RBD persistent volume and will translate
// the in-tree pv source to a CSI Source
func (p rbdCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
if pv == nil || pv.Spec.RBD == nil {
return nil, fmt.Errorf("pv is nil or RBD Volume not defined on pv")
}
var volID string
volumeAttributes := make(map[string]string)
if pv.Annotations[CSIRBDVolHandleAnnKey] != "" {
volID = pv.Annotations[CSIRBDVolHandleAnnKey]
volumeAttributes[clusterIDKey] = pv.Annotations[clusterIDKey]
} else {
mons := strings.Join(pv.Spec.RBD.CephMonitors, ",")
pool := pv.Spec.RBD.RBDPool
image := pv.Spec.RBD.RBDImage
volumeAttributes[staticVolKey] = defaultMigStaticVal
volumeAttributes[clusterIDKey] = fmt.Sprintf("%x", md5.Sum([]byte(mons)))
volID = composeMigVolID(mons, pool, image)
}
err := fillVolAttrsForRequest(pv, volumeAttributes)
if err != nil {
return nil, err
}
if volumeAttributes[imgFeatureKey] == "" {
volumeAttributes[imgFeatureKey] = defaultImgFeatureVal
}
var am v1.PersistentVolumeAccessMode
if pv.Spec.RBD.ReadOnly {
am = v1.ReadOnlyMany
} else {
am = v1.ReadWriteOnce
}
pv.Spec.AccessModes = []v1.PersistentVolumeAccessMode{am}
csiSource := &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
FSType: pv.Spec.RBD.FSType,
VolumeHandle: volID,
VolumeAttributes: volumeAttributes,
NodeStageSecretRef: pv.Spec.RBD.SecretRef,
ControllerExpandSecretRef: pv.Spec.RBD.SecretRef,
}
pv.Spec.RBD = nil
pv.Spec.CSI = csiSource
return pv, nil
}
// TranslateCSIPVToInTree takes a PV with a CSI PersistentVolume Source and will translate
// it to an in-tree Persistent Volume Source for the in-tree volume
func (p rbdCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
if pv == nil || pv.Spec.CSI == nil {
return nil, fmt.Errorf("pv is nil or CSI source not defined on pv")
}
var rbdImageName string
monSlice := []string{""}
csiSource := pv.Spec.CSI
rbdImageName = csiSource.VolumeAttributes[imgNameKey]
rbdPool := csiSource.VolumeAttributes[poolKey]
radosUser := csiSource.VolumeAttributes[adminIDKey]
if radosUser == "" {
radosUser = defaultAdminUser
}
RBDSource := &v1.RBDPersistentVolumeSource{
CephMonitors: monSlice,
RBDImage: rbdImageName,
FSType: csiSource.FSType,
RBDPool: rbdPool,
RadosUser: radosUser,
ReadOnly: csiSource.ReadOnly,
}
if pv.Annotations == nil {
pv.Annotations = make(map[string]string)
}
fillAnnotationsFromCSISource(pv, csiSource)
nodeSecret := csiSource.NodeStageSecretRef
if nodeSecret != nil {
RBDSource.SecretRef = &v1.SecretReference{Name: nodeSecret.Name, Namespace: nodeSecret.Namespace}
}
pv.Spec.CSI = nil
pv.Spec.RBD = RBDSource
return pv, nil
}
// CanSupport tests whether the plugin supports a given persistent volume
// specification from the API.
func (p rbdCSITranslator) CanSupport(pv *v1.PersistentVolume) bool {
return pv != nil && pv.Spec.RBD != nil
}
// CanSupportInline tests whether the plugin supports a given inline volume
// specification from the API.
func (p rbdCSITranslator) CanSupportInline(volume *v1.Volume) bool {
return volume != nil && volume.RBD != nil
}
// GetInTreePluginName returns the in-tree plugin name this migrates
func (p rbdCSITranslator) GetInTreePluginName() string {
return RBDVolumePluginName
}
// GetCSIPluginName returns the name of the CSI plugin that supersedes the in-tree plugin
func (p rbdCSITranslator) GetCSIPluginName() string {
return RBDDriverName
}
// RepairVolumeHandle generates a correct volume handle based on node ID information.
func (p rbdCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) {
return volumeHandle, nil
}
// fillDefaultSCParams fills some sc parameters with default values
func fillDefaultSCParams(params map[string]string) {
params[defaultMigKey] = defaultMigStaticVal
params[poolKey] = defaultPoolVal
params[provSecretNamespaceKey] = defaultAdminSecretNamespace
params[cntrlExpandSecretNamespaceKey] = defaultAdminSecretNamespace
params[nodeStageSecretNamespaceKey] = defaultAdminSecretNamespace
}
// composeMigVolID composes migration handle for RBD PV
// mig_mons-afcca55bc1bdd3f479be1e8281c13ab1_image-e0b45b52-7e09-47d3-8f1b-806995fa4412_7265706c696361706f6f6c
func composeMigVolID(mons string, pool string, image string) string {
clusterIDInHandle := md5.Sum([]byte(mons))
clusterField := monsPfx + fmt.Sprintf("%x", clusterIDInHandle)
poolHashInHandle := hex.EncodeToString([]byte(pool))
imageHashInHandle := strings.TrimPrefix(image, defaultIntreeImagePfx)
imageField := imgPfx + imageHashInHandle
volHash := strings.Join([]string{migVolPfx, clusterField, imageField, poolHashInHandle}, "_")
return volHash
}
// fillVolAttrsForRequest fill the volume attributes for node operations
func fillVolAttrsForRequest(pv *v1.PersistentVolume, volumeAttributes map[string]string) error {
if pv == nil || pv.Spec.RBD == nil {
return fmt.Errorf("pv is nil or RBD Volume not defined on pv")
}
volumeAttributes[imgNameKey] = pv.Spec.RBD.RBDImage
volumeAttributes[poolKey] = pv.Spec.RBD.RBDPool
volumeAttributes[imgFeatureKey] = pv.Annotations[imgFeatureKey]
volumeAttributes[imgFmtKey] = pv.Annotations[imgFmtKey]
volumeAttributes[journalPoolKey] = pv.Annotations[journalPoolKey]
volumeAttributes[defaultMigKey] = defaultMigStaticVal
volumeAttributes["tryOtherMounters"] = defaultMigStaticVal
return nil
}
// fillAnnotationsFromCSISource capture required information from csi source
func fillAnnotationsFromCSISource(pv *v1.PersistentVolume, csiSource *v1.CSIPersistentVolumeSource) {
pv.Annotations[CSIRBDVolHandleAnnKey] = csiSource.VolumeHandle
pv.Annotations[clusterIDKey] = csiSource.VolumeAttributes[clusterIDKey]
pv.Annotations[journalPoolKey] = csiSource.VolumeAttributes[journalPoolKey]
pv.Annotations[imgFeatureKey] = csiSource.VolumeAttributes[imgFeatureKey]
pv.Annotations[imgFmtKey] = csiSource.VolumeAttributes[imgFmtKey]
}

View File

@ -1,527 +0,0 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"reflect"
"testing"
)
func TestTranslateRBDInTreeStorageClassToCSI(t *testing.T) {
translator := NewRBDCSITranslator()
testCases := []struct {
name string
inTreeSC *storage.StorageClass
csiSC *storage.StorageClass
errorExp bool
}{
{
name: "correct",
inTreeSC: &storage.StorageClass{
Provisioner: RBDVolumePluginName,
Parameters: map[string]string{
"adminId": "kubeadmin",
"monitors": "10.70.53.126:6789,10.70.53.156:6789",
"pool": "replicapool",
"adminSecretName": "ceph-admin-secret",
"adminSecretNamespace": "default",
},
},
csiSC: &storage.StorageClass{
Provisioner: RBDDriverName,
Parameters: map[string]string{
"adminId": "kubeadmin",
"pool": "replicapool",
"migration": "true",
"clusterID": "7982de6a23b77bce50b1ba9f2e879cce",
"monitors": "10.70.53.126:6789,10.70.53.156:6789",
"csi.storage.k8s.io/controller-expand-secret-name": "ceph-admin-secret",
"csi.storage.k8s.io/controller-expand-secret-namespace": "default",
"csi.storage.k8s.io/node-stage-secret-name": "ceph-admin-secret",
"csi.storage.k8s.io/node-stage-secret-namespace": "default",
"csi.storage.k8s.io/provisioner-secret-name": "ceph-admin-secret",
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
},
},
errorExp: false,
},
{
name: "missing monitor",
inTreeSC: &storage.StorageClass{
Provisioner: RBDVolumePluginName,
Parameters: map[string]string{
"adminId": "kubeadmin",
"monitors": "",
"pool": "replicapool",
"adminSecretName": "ceph-admin-secret",
"adminSecretNamespace": "default",
},
},
csiSC: nil,
errorExp: true,
},
{
name: "monitor unavailable",
inTreeSC: &storage.StorageClass{
Provisioner: RBDVolumePluginName,
Parameters: map[string]string{
"adminId": "kubeadmin",
"pool": "replicapool",
"adminSecretName": "ceph-admin-secret",
"adminSecretNamespace": "default",
},
},
csiSC: nil,
errorExp: true,
},
{
name: "admin secret unavailable",
inTreeSC: &storage.StorageClass{
Provisioner: RBDVolumePluginName,
Parameters: map[string]string{
"adminId": "kubeadmin",
"pool": "replicapool",
"monitors": "10.70.53.126:6789,10.70.53.156:6789",
"adminSecretNamespace": "default",
},
},
csiSC: nil,
errorExp: true,
},
{
name: "nil, err expected",
inTreeSC: nil,
csiSC: nil,
errorExp: true,
},
}
for _, tc := range testCases {
t.Logf("Testing %v", tc.name)
result, err := translator.TranslateInTreeStorageClassToCSI(tc.inTreeSC)
if err != nil && !tc.errorExp {
t.Errorf("Did not expect error but got: %v", err)
}
if err == nil && tc.errorExp {
t.Errorf("Expected error, but did not get one.")
}
if !reflect.DeepEqual(result, tc.csiSC) {
t.Errorf("Got parameters: %v\n, expected :%v", result, tc.csiSC)
}
}
}
func TestTranslateRBDInTreeInlineVolumeToCSI(t *testing.T) {
translator := NewRBDCSITranslator()
testCases := []struct {
name string
inLine *v1.Volume
csiVol *v1.PersistentVolume
errExpected bool
}{
{
name: "normal",
inLine: &v1.Volume{
Name: "rbdVol",
VolumeSource: v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{"10.70.53.126:6789,10.70.53.156:6789"},
RBDPool: "replicapool",
RBDImage: "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{Name: "ceph-secret"},
FSType: "ext4",
ReadOnly: false,
},
},
},
csiVol: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
// Must be unique per disk as it is used as the unique part of the
// staging path
Name: "rbd.csi.ceph.com-kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
VolumeHandle: "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
FSType: "ext4",
VolumeAttributes: map[string]string{
"clusterID": "7982de6a23b77bce50b1ba9f2e879cce",
"imageFeatures": "layering",
"pool": "replicapool",
"staticVolume": "true",
},
NodeStageSecretRef: &v1.SecretReference{Name: "ceph-secret", Namespace: "ns"},
ControllerExpandSecretRef: &v1.SecretReference{Name: "ceph-secret", Namespace: "ns"},
},
},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
},
},
errExpected: false,
},
{
name: "nil",
inLine: nil,
csiVol: nil,
errExpected: true,
},
}
for _, tc := range testCases {
t.Logf("Testing %v", tc.name)
result, err := translator.TranslateInTreeInlineVolumeToCSI(tc.inLine, "ns")
if err != nil && !tc.errExpected {
t.Errorf("Did not expect error but got: %v", err)
}
if err == nil && tc.errExpected {
t.Errorf("Expected error, but did not get one.")
}
if !reflect.DeepEqual(result, tc.csiVol) {
t.Errorf("Got parameters: %v\n, expected :%v", result, tc.csiVol)
}
}
}
func TestTranslateRBDInTreePVToCSI(t *testing.T) {
translator := NewRBDCSITranslator()
testCases := []struct {
name string
inTree *v1.PersistentVolume
csi *v1.PersistentVolume
errExpected bool
}{
{
name: "no RBD volume",
inTree: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "rbd.csi.ceph.com",
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
},
},
csi: nil,
errExpected: true,
},
{
name: "normal",
inTree: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"10.70.53.126:6789"},
RBDPool: "replicapool",
RBDImage: "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
RadosUser: "admin",
FSType: "ext4",
ReadOnly: false,
SecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
csi: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
VolumeHandle: "mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e4111eb6-4088-11ec-b823-0242ac110003_7265706c696361706f6f6c",
ReadOnly: false,
FSType: "ext4",
VolumeAttributes: map[string]string{
"clusterID": "b7f67366bb43f32e07d8a261a7840da9",
"imageFeatures": "layering",
"imageFormat": "",
"imageName": "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
"journalPool": "",
"migration": "true",
"pool": "replicapool",
"staticVolume": "true",
"tryOtherMounters": "true",
},
NodeStageSecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
ControllerExpandSecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
errExpected: false,
},
{
name: "normal-pvc-without-dynamic-provisioner-pfx-set",
inTree: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{"10.70.53.126:6789"},
RBDPool: "replicapool",
RBDImage: "e4111eb6-4088-11ec-b823-0242ac110003",
RadosUser: "admin",
FSType: "ext4",
ReadOnly: false,
SecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
csi: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
VolumeHandle: "mig_mons-b7f67366bb43f32e07d8a261a7840da9_image-e4111eb6-4088-11ec-b823-0242ac110003_7265706c696361706f6f6c",
ReadOnly: false,
FSType: "ext4",
VolumeAttributes: map[string]string{
"clusterID": "b7f67366bb43f32e07d8a261a7840da9",
"imageFeatures": "layering",
"imageFormat": "",
"imageName": "e4111eb6-4088-11ec-b823-0242ac110003",
"journalPool": "",
"migration": "true",
"pool": "replicapool",
"staticVolume": "true",
"tryOtherMounters": "true",
},
NodeStageSecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
ControllerExpandSecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
errExpected: false,
},
{
name: "nil PV",
inTree: nil,
csi: nil,
errExpected: true,
},
}
for _, tc := range testCases {
t.Logf("Testing %v", tc.name)
result, err := translator.TranslateInTreePVToCSI(tc.inTree)
if err != nil && !tc.errExpected {
t.Errorf("Did not expect error but got: %v", err)
}
if err == nil && tc.errExpected {
t.Errorf("Expected error, but did not get one.")
}
if !reflect.DeepEqual(result, tc.csi) {
t.Errorf("Got parameters: %v\n, expected :%v", result, tc.csi)
}
}
}
func TestTranslateCSIPvToInTree(t *testing.T) {
translator := NewRBDCSITranslator()
testCases := []struct {
name string
csi *v1.PersistentVolume
inTree *v1.PersistentVolume
errExpected bool
}{
{
name: "no CSI section",
csi: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
},
},
inTree: nil,
errExpected: true,
},
{
name: "normal",
csi: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: RBDDriverName,
VolumeHandle: "dummy",
ReadOnly: false,
FSType: "ext4",
VolumeAttributes: map[string]string{
"clusterID": "b7f67366bb43f32e07d8a261a7840da9",
"imageFeatures": "layering",
"imageFormat": "1",
"imageName": "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
"journalPool": "some",
"migration": "true",
"pool": "replicapool",
"staticVolume": "true",
"tryOtherMounters": "true",
},
NodeStageSecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
inTree: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: RBDDriverName,
Annotations: map[string]string{
"clusterID": "b7f67366bb43f32e07d8a261a7840da9",
"imageFeatures": "layering",
"imageFormat": "1",
"journalPool": "some",
"rbd.csi.ceph.com/volume-handle": "dummy",
},
},
Spec: v1.PersistentVolumeSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
ClaimRef: &v1.ObjectReference{
Name: "test-pvc",
Namespace: "default",
},
PersistentVolumeSource: v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{""},
RBDPool: "replicapool",
RBDImage: "kubernetes-dynamic-pvc-e4111eb6-4088-11ec-b823-0242ac110003",
RadosUser: "admin",
FSType: "ext4",
ReadOnly: false,
SecretRef: &v1.SecretReference{
Name: "ceph-secret",
Namespace: "default",
},
},
},
},
},
errExpected: false,
},
{
name: "nil PV",
inTree: nil,
csi: nil,
errExpected: true,
},
}
for _, tc := range testCases {
t.Logf("Testing %v", tc.name)
result, err := translator.TranslateCSIPVToInTree(tc.csi)
if err != nil && !tc.errExpected {
t.Errorf("Did not expect error but got: %v", err)
}
if err == nil && tc.errExpected {
t.Errorf("Expected error, but did not get one.")
}
if !reflect.DeepEqual(result, tc.inTree) {
t.Errorf("Got parameters: %v\n, expected :%v", result, tc.inTree)
}
}
}

View File

@ -34,7 +34,6 @@ var (
plugins.AzureFileDriverName: plugins.NewAzureFileCSITranslator(),
plugins.VSphereDriverName: plugins.NewvSphereCSITranslator(),
plugins.PortworxDriverName: plugins.NewPortworxCSITranslator(),
plugins.RBDDriverName: plugins.NewRBDCSITranslator(),
}
)

View File

@ -445,12 +445,6 @@ func generateUniqueVolumeSource(driverName string) (v1.VolumeSource, error) {
VolumeID: string(uuid.NewUUID()),
},
}, nil
case plugins.RBDDriverName:
return v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
RBDImage: string(uuid.NewUUID()),
},
}, nil
default:
return v1.VolumeSource{}, fmt.Errorf("couldn't find logic for driver: %v", driverName)
}
@ -472,11 +466,6 @@ func TestPluginNameMappings(t *testing.T) {
inTreePluginName: "kubernetes.io/aws-ebs",
csiPluginName: "ebs.csi.aws.com",
},
{
name: "RBD plugin name",
inTreePluginName: "kubernetes.io/rbd",
csiPluginName: "rbd.csi.ceph.com",
},
}
for _, test := range testCases {
t.Logf("Testing %v", test.name)

View File

@ -400,136 +400,6 @@ func (v *iSCSIVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServer(ctx, v.f, v.serverPod)
}
// Ceph RBD
type rbdDriver struct {
driverInfo storageframework.DriverInfo
}
type rbdVolume struct {
serverPod *v1.Pod
serverIP string
secret *v1.Secret
f *framework.Framework
}
var _ storageframework.TestDriver = &rbdDriver{}
var _ storageframework.PreprovisionedVolumeTestDriver = &rbdDriver{}
var _ storageframework.InlineVolumeTestDriver = &rbdDriver{}
var _ storageframework.PreprovisionedPVTestDriver = &rbdDriver{}
// InitRbdDriver returns rbdDriver that implements TestDriver interface
func InitRbdDriver() storageframework.TestDriver {
return &rbdDriver{
driverInfo: storageframework.DriverInfo{
Name: "rbd",
InTreePluginName: "kubernetes.io/rbd",
TestTags: []interface{}{feature.Volumes, framework.WithSerial()},
MaxFileSize: storageframework.FileSizeMedium,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Gi",
},
SupportedFsType: sets.NewString(
"", // Default fsType
"ext4",
),
Capabilities: map[storageframework.Capability]bool{
storageframework.CapPersistence: true,
storageframework.CapFsGroup: true,
storageframework.CapBlock: true,
storageframework.CapExec: true,
storageframework.CapMultiPODs: true,
storageframework.CapMultiplePVsSameID: true,
},
},
}
}
func (r *rbdDriver) GetDriverInfo() *storageframework.DriverInfo {
return &r.driverInfo
}
func (r *rbdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
}
func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
rv, ok := e2evolume.(*rbdVolume)
if !ok {
framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
}
volSource := v1.VolumeSource{
RBD: &v1.RBDVolumeSource{
CephMonitors: []string{rv.serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.LocalObjectReference{
Name: rv.secret.Name,
},
ReadOnly: readOnly,
},
}
if fsType != "" {
volSource.RBD.FSType = fsType
}
return &volSource
}
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
rv, ok := e2evolume.(*rbdVolume)
if !ok {
framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
}
f := rv.f
ns := f.Namespace
pvSource := v1.PersistentVolumeSource{
RBD: &v1.RBDPersistentVolumeSource{
CephMonitors: []string{rv.serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
SecretRef: &v1.SecretReference{
Name: rv.secret.Name,
Namespace: ns.Name,
},
ReadOnly: readOnly,
},
}
if fsType != "" {
pvSource.RBD.FSType = fsType
}
return &pvSource, nil
}
func (r *rbdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: r,
Prefix: "rbd",
Framework: f,
}
}
func (r *rbdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
c, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name)
config.ServerConfig = &c
return &rbdVolume{
serverPod: serverPod,
serverIP: serverIP,
secret: secret,
f: f,
}
}
func (v *rbdVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret)
}
// Ceph
type cephFSDriver struct {
driverInfo storageframework.DriverInfo

View File

@ -30,7 +30,6 @@ import (
var testDrivers = []func() storageframework.TestDriver{
drivers.InitNFSDriver,
drivers.InitISCSIDriver,
drivers.InitRbdDriver,
drivers.InitCephFSDriver,
drivers.InitHostPathDriver,
drivers.InitHostPathSymlinkDriver,