Merge pull request #122090 from carlory/remove-intree-vsphere

remove the deprecated in-tree vsphere volume's code
This commit is contained in:
Kubernetes Prow Robot 2024-01-05 16:52:22 +01:00 committed by GitHub
commit d39f401767
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 0 additions and 2205 deletions

View File

@ -1,19 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- saad-ali
- thockin
- divyenpatel
emeritus_approvers:
- matchstick
- abrarshivani
- BaluDontu
- SandeepPissay
- pmorie
reviewers:
- saad-ali
- justinsb
- jsafrane
- jingxu97
- msau42
- divyenpatel

View File

@ -1,327 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path/filepath"
"runtime"
"time"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
"k8s.io/utils/keymutex"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/vsphere"
)
type vsphereVMDKAttacher struct {
host volume.VolumeHost
vsphereVolumes vsphere.Volumes
}
var _ volume.Attacher = &vsphereVMDKAttacher{}
var _ volume.DeviceMounter = &vsphereVMDKAttacher{}
var _ volume.AttachableVolumePlugin = &vsphereVolumePlugin{}
var _ volume.DeviceMountableVolumePlugin = &vsphereVolumePlugin{}
// Singleton key mutex for keeping attach operations for the same host atomic
var attachdetachMutex = keymutex.NewHashed(0)
func (plugin *vsphereVolumePlugin) NewAttacher() (volume.Attacher, error) {
vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &vsphereVMDKAttacher{
host: plugin.host,
vsphereVolumes: vsphereCloud,
}, nil
}
func (plugin *vsphereVolumePlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
// Attaches the volume specified by the given spec to the given host.
// On success, returns the device path where the device was attached on the
// node.
// Callers are responsible for retryinging on failure.
// Callers are responsible for thread safety between concurrent attach and
// detach operations.
func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
klog.V(4).Infof("vSphere: Attach disk called for node %s", nodeName)
// Keeps concurrent attach operations to same host atomic
attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(string(nodeName))
// vsphereCloud.AttachDisk checks if disk is already attached to host and
// succeeds in that case, so no need to do that separately.
diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyName, nodeName)
if err != nil {
klog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err)
return "", err
}
return filepath.Join(diskByIDPath, diskSCSIPrefix+diskUUID), nil
}
func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
klog.Warningf("Attacher.VolumesAreAttached called for node %q - Please use BulkVerifyVolumes for vSphere", nodeName)
volumeNodeMap := map[types.NodeName][]*volume.Spec{
nodeName: specs,
}
nodeVolumesResult := make(map[*volume.Spec]bool)
nodesVerificationMap, err := attacher.BulkVerifyVolumes(volumeNodeMap)
if err != nil {
klog.Errorf("Attacher.VolumesAreAttached - error checking volumes for node %q with %v", nodeName, err)
return nodeVolumesResult, err
}
if result, ok := nodesVerificationMap[nodeName]; ok {
return result, nil
}
return nodeVolumesResult, nil
}
func (attacher *vsphereVMDKAttacher) BulkVerifyVolumes(volumesByNode map[types.NodeName][]*volume.Spec) (map[types.NodeName]map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[types.NodeName]map[*volume.Spec]bool)
volumePathsByNode := make(map[types.NodeName][]string)
volumeSpecMap := make(map[string]*volume.Spec)
for nodeName, volumeSpecs := range volumesByNode {
for _, volumeSpec := range volumeSpecs {
volumeSource, _, err := getVolumeSource(volumeSpec)
if err != nil {
klog.Errorf("Error getting volume (%q) source : %v", volumeSpec.Name(), err)
continue
}
volPath := volumeSource.VolumePath
volumePathsByNode[nodeName] = append(volumePathsByNode[nodeName], volPath)
nodeVolume, nodeVolumeExists := volumesAttachedCheck[nodeName]
if !nodeVolumeExists {
nodeVolume = make(map[*volume.Spec]bool)
}
nodeVolume[volumeSpec] = true
volumeSpecMap[volPath] = volumeSpec
volumesAttachedCheck[nodeName] = nodeVolume
}
}
attachedResult, err := attacher.vsphereVolumes.DisksAreAttached(volumePathsByNode)
if err != nil {
klog.Errorf("Error checking if volumes are attached to nodes: %+v. err: %v", volumePathsByNode, err)
return volumesAttachedCheck, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if !attached {
spec := volumeSpecMap[volumePath]
setNodeVolume(volumesAttachedCheck, spec, nodeName, false)
}
}
}
return volumesAttachedCheck, nil
}
func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if devicePath == "" {
return "", fmt.Errorf("WaitForAttach failed for VMDK %q: devicePath is empty", volumeSource.VolumePath)
}
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
klog.V(5).Infof("Checking VMDK %q is attached", volumeSource.VolumePath)
path, err := verifyDevicePath(devicePath)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
klog.Warningf("Error verifying VMDK (%q) is attached: %v", volumeSource.VolumePath, err)
} else if path != "" {
// A device path has successfully been created for the VMDK
klog.Infof("Successfully found attached VMDK %q.", volumeSource.VolumePath)
return path, nil
}
case <-timer.C:
return "", fmt.Errorf("could not find attached VMDK %q. Timeout waiting for mount paths to be created", volumeSource.VolumePath)
}
}
}
// GetDeviceMountPath returns a path where the device should
// point which should be bind mounted for individual volumes.
func (attacher *vsphereVMDKAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDPath(attacher.host, volumeSource.VolumePath), nil
}
// GetMountDeviceRefs finds all other references to the device referenced
// by deviceMountPath; returns a list of paths.
func (plugin *vsphereVolumePlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mounter.GetMountRefs(deviceMountPath)
}
// MountDevice mounts device to global mount point.
func (attacher *vsphereVMDKAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
klog.Infof("vsphere MountDevice mount %s to %s", devicePath, deviceMountPath)
mounter := attacher.host.GetMounter(vsphereVolumePluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
dir := deviceMountPath
if runtime.GOOS == "windows" {
dir = filepath.Dir(deviceMountPath)
}
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Errorf("Failed to create directory at %#v. err: %s", dir, err)
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if notMnt {
diskMounter := volumeutil.NewSafeFormatAndMountFromHost(vsphereVolumePluginName, attacher.host)
mountOptions := volumeutil.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
klog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
}
return nil
}
type vsphereVMDKDetacher struct {
mounter mount.Interface
vsphereVolumes vsphere.Volumes
}
var _ volume.Detacher = &vsphereVMDKDetacher{}
var _ volume.DeviceUnmounter = &vsphereVMDKDetacher{}
func (plugin *vsphereVolumePlugin) NewDetacher() (volume.Detacher, error) {
vsphereCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &vsphereVMDKDetacher{
mounter: plugin.host.GetMounter(plugin.GetPluginName()),
vsphereVolumes: vsphereCloud,
}, nil
}
func (plugin *vsphereVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}
// Detach the given device from the given node.
func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error {
volPath := getVolPathfromVolumeName(volumeName)
attached, newVolumePath, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
if err != nil {
// Log error and continue with detach
klog.Errorf(
"Error checking if volume (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
volPath, nodeName, err)
}
if err == nil && !attached {
// Volume is already detached from node.
klog.Infof("detach operation was successful. volume %q is already detached from node %q.", volPath, nodeName)
return nil
}
attachdetachMutex.LockKey(string(nodeName))
defer attachdetachMutex.UnlockKey(string(nodeName))
if err := detacher.vsphereVolumes.DetachDisk(newVolumePath, nodeName); err != nil {
klog.Errorf("Error detaching volume %q: %v", volPath, err)
return err
}
return nil
}
func (detacher *vsphereVMDKDetacher) UnmountDevice(deviceMountPath string) error {
return mount.CleanupMountPoint(deviceMountPath, detacher.mounter, false)
}
func (plugin *vsphereVolumePlugin) CanAttach(spec *volume.Spec) (bool, error) {
return true, nil
}
func (plugin *vsphereVolumePlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
func setNodeVolume(
nodeVolumeMap map[types.NodeName]map[*volume.Spec]bool,
volumeSpec *volume.Spec,
nodeName types.NodeName,
check bool) {
volumeMap := nodeVolumeMap[nodeName]
if volumeMap == nil {
volumeMap = make(map[*volume.Spec]bool)
nodeVolumeMap[nodeName] = volumeMap
}
volumeMap[volumeSpec] = check
}

View File

@ -1,335 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"fmt"
"os"
"path/filepath"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/legacy-cloud-providers/vsphere/vclib"
"k8s.io/klog/v2"
)
var (
// diskNameErr is the error when disk name is wrong.
diskNameErr = errors.New("wrong diskName")
// nodeNameErr is the error when node name is wrong.
nodeNameErr = errors.New("wrong nodeName")
)
func TestGetDeviceName_Volume(t *testing.T) {
plugin := newPlugin(t)
volPath := "[local] volumes/test"
spec := createVolSpec(volPath)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != volPath {
t.Errorf("GetDeviceName error: expected %s, got %s", volPath, deviceName)
}
}
func TestGetDeviceName_PersistentVolume(t *testing.T) {
plugin := newPlugin(t)
volPath := "[local] volumes/test"
spec := createPVSpec(volPath)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != volPath {
t.Errorf("GetDeviceName error: expected %s, got %s", volPath, deviceName)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name string
// For fake vSphere:
attach attachCall
detach detachCall
diskIsAttached diskIsAttachedCall
t *testing.T
// Actual test to run
test func(test *testcase) (string, error)
// Expected return of the test
expectedDevice string
expectedError error
}
func TestAttachDetach(t *testing.T) {
uuid := "00000000000000"
diskName := "[local] volumes/test"
nodeName := types.NodeName("host")
spec := createVolSpec(diskName)
expectedDevice := filepath.FromSlash("/dev/disk/by-id/wwn-0x" + uuid)
attachError := errors.New("fake attach error")
detachError := errors.New("fake detach error")
diskCheckError := errors.New("fake DiskIsAttached error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
attach: attachCall{diskName, nodeName, uuid, nil},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedDevice: expectedDevice,
},
// Attach call fails
{
name: "Attach_Negative",
attach: attachCall{diskName, nodeName, "", attachError},
test: func(testcase *testcase) (string, error) {
attacher := newAttacher(testcase)
return attacher.Attach(spec, nodeName)
},
expectedError: attachError,
},
// Detach succeeds
{
name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Disk is already detached
{
name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Detach succeeds when DiskIsAttached fails
{
name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, detachError},
test: func(testcase *testcase) (string, error) {
detacher := newDetacher(testcase)
return "", detacher.Detach(diskName, nodeName)
},
expectedError: detachError,
},
}
for _, testcase := range tests {
testcase.t = t
device, err := testcase.test(&testcase)
if err != testcase.expectedError {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedError.Error(), err.Error())
}
if device != testcase.expectedDevice {
t.Errorf("%s failed: expected device=%q, got %q", testcase.name, testcase.expectedDevice, device)
}
t.Logf("Test %q succeeded", testcase.name)
}
}
// newPlugin creates a new vsphereVolumePlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin(t *testing.T) *vsphereVolumePlugin {
host := volumetest.NewFakeVolumeHost(t, os.TempDir(), nil, nil)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*vsphereVolumePlugin)
}
func newAttacher(testcase *testcase) *vsphereVMDKAttacher {
return &vsphereVMDKAttacher{
host: nil,
vsphereVolumes: testcase,
}
}
func newDetacher(testcase *testcase) *vsphereVMDKDetacher {
return &vsphereVMDKDetacher{
vsphereVolumes: testcase,
}
}
func createVolSpec(name string) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},
},
}
}
func createPVSpec(name string) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: name,
},
},
},
},
}
}
// Fake vSphere implementation
type attachCall struct {
diskName string
nodeName types.NodeName
retDeviceUUID string
ret error
}
type detachCall struct {
diskName string
nodeName types.NodeName
ret error
}
type diskIsAttachedCall struct {
diskName string
nodeName types.NodeName
isAttached bool
ret error
}
func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, error) {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return "", errors.New("unexpected AttachDisk call")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return "", fmt.Errorf(`unexpected AttachDisk call: %w`, diskNameErr)
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return "", fmt.Errorf(`unexpected AttachDisk call: %w`, nodeNameErr)
}
klog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret)
return expected.retDeviceUUID, expected.ret
}
func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error {
expected := &testcase.detach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return errors.New("unexpected DetachDisk call")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DetachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return fmt.Errorf(`unexpected DetachDisk call: %w`, diskNameErr)
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return fmt.Errorf(`unexpected DetachDisk call: %w`, nodeNameErr)
}
klog.V(4).Infof("DetachDisk call: %s, %s, returning %v", diskName, nodeName, expected.ret)
return expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, string, error) {
expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, diskName, errors.New("unexpected DiskIsAttached call")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
return false, diskName, fmt.Errorf(`unexpected DiskIsAttached call: %w`, diskNameErr)
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, diskName, fmt.Errorf(`unexpected DiskIsAttached call: %w`, nodeNameErr)
}
klog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, diskName, expected.ret
}
func (testcase *testcase) DisksAreAttached(nodeVolumes map[types.NodeName][]string) (map[types.NodeName]map[string]bool, error) {
return nil, errors.New("not implemented")
}
func (testcase *testcase) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) {
return "", errors.New("not implemented")
}
func (testcase *testcase) DeleteVolume(vmDiskPath string) error {
return errors.New("not implemented")
}

View File

@ -1,17 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume

View File

@ -1,472 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&vsphereVolumePlugin{}}
}
type vsphereVolumePlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &vsphereVolumePlugin{}
var _ volume.PersistentVolumePlugin = &vsphereVolumePlugin{}
var _ volume.DeletableVolumePlugin = &vsphereVolumePlugin{}
var _ volume.ProvisionableVolumePlugin = &vsphereVolumePlugin{}
const (
vsphereVolumePluginName = "kubernetes.io/vsphere-volume"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(vsphereVolumePluginName), volName)
}
// vSphere Volume Plugin
func (plugin *vsphereVolumePlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *vsphereVolumePlugin) GetPluginName() string {
return vsphereVolumePluginName
}
func (plugin *vsphereVolumePlugin) IsMigratedToCSI() bool {
return true
}
func (plugin *vsphereVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.VolumePath, nil
}
func (plugin *vsphereVolumePlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume != nil) ||
(spec.Volume != nil && spec.Volume.VsphereVolume != nil)
}
func (plugin *vsphereVolumePlugin) RequiresRemount(spec *volume.Spec) bool {
return false
}
func (plugin *vsphereVolumePlugin) SupportsMountOption() bool {
return true
}
func (plugin *vsphereVolumePlugin) SupportsBulkVolumeVerification() bool {
return true
}
func (plugin *vsphereVolumePlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return false, nil
}
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newUnmounterInternal(volName, podUID, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
vvol, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
volPath := vvol.VolumePath
fsType := vvol.FSType
return &vsphereVolumeMounter{
vsphereVolume: &vsphereVolume{
podUID: podUID,
volName: spec.Name(),
volPath: volPath,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
fsType: fsType,
diskMounter: util.NewSafeFormatAndMountFromHost(plugin.GetPluginName(), plugin.host),
mountOptions: util.MountOptionFromSpec(spec),
}, nil
}
func (plugin *vsphereVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &vsphereVolumeUnmounter{
&vsphereVolume{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
if !ok {
return volume.ReconstructedVolume{}, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
}
hu := kvh.GetHostUtil()
pluginMntDir := util.GetPluginMountDir(plugin.host, plugin.GetPluginName())
volumePath, err := hu.GetDeviceNameFromMount(mounter, mountPath, pluginMntDir)
if err != nil {
return volume.ReconstructedVolume{}, err
}
volumePath = strings.Replace(volumePath, "\\040", " ", -1)
klog.V(5).Infof("vSphere volume path is %q", volumePath)
vsphereVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
},
},
}
return volume.ReconstructedVolume{
Spec: volume.NewSpecFromVolume(vsphereVolume),
}, nil
}
// Abstract interface to disk operations.
type vdManager interface {
// Creates a volume
CreateVolume(provisioner *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error)
// Deletes a volume
DeleteVolume(deleter *vsphereVolumeDeleter) error
}
// vspherePersistentDisk volumes are disk resources are attached to the kubelet's host machine and exposed to the pod.
type vsphereVolume struct {
volName string
podUID types.UID
// Unique identifier of the volume, used to find the disk resource in the provider.
volPath string
// Utility interface that provides API calls to the provider to attach/detach disks.
manager vdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *vsphereVolumePlugin
volume.MetricsProvider
}
var _ volume.Mounter = &vsphereVolumeMounter{}
type vsphereVolumeMounter struct {
*vsphereVolume
fsType string
diskMounter *mount.SafeFormatAndMount
mountOptions []string
}
func (b *vsphereVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
SELinuxRelabel: true,
Managed: true,
}
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUp(mounterArgs volume.MounterArgs) error {
return b.SetUpAt(b.GetPath(), mounterArgs)
}
// SetUp attaches the disk and bind mounts to the volume path.
func (b *vsphereVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
klog.V(5).Infof("vSphere volume setup %s to %s", b.volPath, dir)
// TODO: handle failed mounts here.
notmnt, err := b.mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
klog.V(4).Infof("IsLikelyNotMountPoint failed: %v", err)
return err
}
if !notmnt {
klog.V(4).Infof("Something is already mounted to target %s", dir)
return nil
}
if runtime.GOOS != "windows" {
// On Windows, Mount will create the parent of dir and mklink (create a symbolic link) at dir later, so don't create a
// directory at dir now. Otherwise mklink will error: "Cannot create a file when that file already exists".
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Errorf("Could not create directory %s: %v", dir, err)
return err
}
}
options := []string{"bind"}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
globalPDPath := makeGlobalPDPath(b.plugin.host, b.volPath)
mountOptions := util.JoinMountOptions(options, b.mountOptions)
err = b.mounter.MountSensitiveWithoutSystemd(globalPDPath, dir, "", mountOptions, nil)
if err != nil {
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
klog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
klog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notmnt {
klog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", b.GetPath())
return err
}
}
os.Remove(dir)
return err
}
volume.SetVolumeOwnership(b, dir, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(b.plugin, nil))
klog.V(3).Infof("vSphere volume %s mounted to %s", b.volPath, dir)
return nil
}
var _ volume.Unmounter = &vsphereVolumeUnmounter{}
type vsphereVolumeUnmounter struct {
*vsphereVolume
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDown() error {
return v.TearDownAt(v.GetPath())
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (v *vsphereVolumeUnmounter) TearDownAt(dir string) error {
return mount.CleanupMountPoint(dir, v.mounter, false)
}
func makeGlobalPDPath(host volume.VolumeHost, devName string) string {
return filepath.Join(host.GetPluginDir(vsphereVolumePluginName), util.MountsInGlobalPDPath, devName)
}
func (vv *vsphereVolume) GetPath() string {
name := vsphereVolumePluginName
return vv.plugin.host.GetPodVolumeDir(vv.podUID, utilstrings.EscapeQualifiedName(name), vv.volName)
}
// vSphere Persistent Volume Plugin
func (plugin *vsphereVolumePlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// vSphere Deletable Volume Plugin
type vsphereVolumeDeleter struct {
*vsphereVolume
}
var _ volume.Deleter = &vsphereVolumeDeleter{}
func (plugin *vsphereVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newDeleterInternal(spec *volume.Spec, manager vdManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.VsphereVolume == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.VsphereVolume is nil")
}
return &vsphereVolumeDeleter{
&vsphereVolume{
volName: spec.Name(),
volPath: spec.PersistentVolume.Spec.VsphereVolume.VolumePath,
manager: manager,
plugin: plugin,
}}, nil
}
func (r *vsphereVolumeDeleter) Delete() error {
return r.manager.DeleteVolume(r)
}
// vSphere Provisionable Volume Plugin
type vsphereVolumeProvisioner struct {
*vsphereVolume
options volume.VolumeOptions
}
var _ volume.Provisioner = &vsphereVolumeProvisioner{}
func (plugin *vsphereVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeOptions, manager vdManager) (volume.Provisioner, error) {
return &vsphereVolumeProvisioner{
vsphereVolume: &vsphereVolume{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
if !util.ContainsAllAccessModes(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
}
klog.V(1).Infof("Provision with selectedNode: %s and allowedTopologies : %s", getNodeName(selectedNode), allowedTopologies)
selectedZones, err := volumehelpers.ZonesFromAllowedTopologies(allowedTopologies)
if err != nil {
return nil, err
}
klog.V(4).Infof("Selected zones for volume : %s", selectedZones)
volSpec, err := v.manager.CreateVolume(v, selectedNode, selectedZones.List())
if err != nil {
return nil, err
}
if volSpec.Fstype == "" {
volSpec.Fstype = "ext4"
}
volumeMode := v.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
klog.V(5).Infof("vSphere block volume should not have any FSType")
volSpec.Fstype = ""
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: v.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
util.VolumeDynamicallyCreatedByKey: "vsphere-volume-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
AccessModes: v.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", volSpec.Size)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volSpec.Path,
FSType: volSpec.Fstype,
StoragePolicyName: volSpec.StoragePolicyName,
StoragePolicyID: volSpec.StoragePolicyID,
},
},
MountOptions: v.options.MountOptions,
},
}
if len(v.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = v.plugin.GetAccessModes()
}
labels := volSpec.Labels
requirements := make([]v1.NodeSelectorRequirement, 0)
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
var values []string
if k == v1.LabelTopologyZone || k == v1.LabelFailureDomainBetaZone {
values, err = volumehelpers.LabelZonesToList(v)
if err != nil {
return nil, fmt.Errorf("failed to convert label string for Zone: %s to a List: %v", v, err)
}
} else {
values = []string{v}
}
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: values})
}
}
if len(requirements) > 0 {
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
}
return pv, nil
}
func getVolumeSource(
spec *volume.Spec) (*v1.VsphereVirtualDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
return spec.Volume.VsphereVolume, spec.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.VsphereVolume != nil {
return spec.PersistentVolume.Spec.VsphereVolume, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("spec does not reference a VSphere volume type")
}
func getNodeName(node *v1.Node) string {
if node == nil {
return ""
}
return node.Name
}

View File

@ -1,169 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"path/filepath"
"strings"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.BlockVolumePlugin = &vsphereVolumePlugin{}
func (plugin *vsphereVolumePlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
blkUtil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkUtil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
klog.Errorf("Failed to find GlobalMapPathUUID from Pod: %s with error: %+v", podUID, err)
return nil, err
}
klog.V(5).Infof("globalMapPathUUID: %v", globalMapPathUUID)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath)
}
func getVolumeSpecFromGlobalMapPath(volumeName, globalMapPath string) (*volume.Spec, error) {
// Construct volume spec from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/vsphere-volume/volumeDevices/[datastore1]\\040volumes/myDisk
volPath := filepath.Base(globalMapPath)
volPath = strings.Replace(volPath, "\\040", "", -1)
if len(volPath) <= 1 {
return nil, fmt.Errorf("failed to get volume path from global path=%s", globalMapPath)
}
block := v1.PersistentVolumeBlock
vsphereVolume := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: volumeName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volPath,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(vsphereVolume, true), nil
}
func (plugin *vsphereVolumePlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, &VsphereDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *vsphereVolumePlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
klog.Errorf("Failed to get Volume source from volume Spec: %+v with error: %+v", *spec, err)
return nil, err
}
volPath := volumeSource.VolumePath
mapper := &vsphereBlockVolumeMapper{
vsphereVolume: &vsphereVolume{
volName: spec.Name(),
podUID: podUID,
volPath: volPath,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
}
blockPath, err := mapper.GetGlobalMapPath(spec)
if err != nil {
return nil, fmt.Errorf("failed to get device path: %v", err)
}
mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID)))
return mapper, nil
}
func (plugin *vsphereVolumePlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, &VsphereDiskUtil{})
}
func (plugin *vsphereVolumePlugin) newUnmapperInternal(volName string, podUID types.UID, manager vdManager) (volume.BlockVolumeUnmapper, error) {
return &vsphereBlockVolumeUnmapper{
vsphereVolume: &vsphereVolume{
volName: volName,
podUID: podUID,
volPath: volName,
manager: manager,
plugin: plugin,
},
}, nil
}
var _ volume.BlockVolumeMapper = &vsphereBlockVolumeMapper{}
type vsphereBlockVolumeMapper struct {
*vsphereVolume
}
var _ volume.BlockVolumeUnmapper = &vsphereBlockVolumeUnmapper{}
type vsphereBlockVolumeUnmapper struct {
*vsphereVolume
volume.MetricsNil
}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumePath
func (v *vsphereVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return filepath.Join(v.plugin.host.GetVolumeDevicePluginDir(vsphereVolumePluginName), string(volumeSource.VolumePath)), nil
}
func (v *vsphereVolume) GetPodDeviceMapPath() (string, string) {
return v.plugin.host.GetPodVolumeDeviceDir(v.podUID, utilstrings.EscapeQualifiedName(vsphereVolumePluginName)), v.volName
}
// SupportsMetrics returns true for vsphereBlockVolumeMapper as it initializes the
// MetricsProvider.
func (vbvm *vsphereBlockVolumeMapper) SupportsMetrics() bool {
return true
}

View File

@ -1,153 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"os"
"path/filepath"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
var (
testVolumePath = "volPath1"
testGlobalPath = "plugins/kubernetes.io/vsphere-volume/volumeDevices/volPath1"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~vsphere-volume"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
// Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("", "")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath("myVolume", expectedGlobalPath)
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %s", err)
}
if spec.PersistentVolume.Name != "myVolume" {
t.Errorf("Invalid PV name from GlobalMapPath spec: %s", spec.PersistentVolume.Name)
}
if spec.PersistentVolume.Spec.VsphereVolume.VolumePath != testVolumePath {
t.Fatalf("Invalid volumePath from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.VsphereVolume.VolumePath)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
spec := getTestVolume(true) // block volume
pluginMgr := volume.VolumePluginMgr{}
pluginMgr.InitPlugins(ProbeVolumePlugins(), nil, volumetest.NewFakeVolumeHost(t, tmpVDir, nil, nil))
plugin, err := pluginMgr.FindMapperPluginByName(vsphereVolumePluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", vsphereVolumePluginName)
}
if plugin.GetPluginName() != vsphereVolumePluginName {
t.Fatalf("Wrong name: %s", plugin.GetPluginName())
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID("poduid"),
},
}
mapper, err := plugin.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
// GetGlobalMapPath
globalMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(globalMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s", spec.PersistentVolume.Spec.VsphereVolume.VolumePath)
}
if globalMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s %s", globalMapPath, expectedGlobalPath)
}
// GetPodDeviceMapPath
devicePath, volumeName := mapper.GetPodDeviceMapPath()
if devicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", devicePath, expectedPodPath)
}
if volumeName != testVolumePath {
t.Errorf("Got unexpected volNamne: %s, expected %s", volumeName, testVolumePath)
}
}
func getTestVolume(isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testVolumePath,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: testVolumePath,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, true)
}

View File

@ -1,259 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"os"
"path/filepath"
"testing"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/cloud-provider/fake"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/legacy-cloud-providers/vsphere"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/vsphere-volume" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
type fakePDManager struct {
}
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error) {
volSpec = &VolumeSpec{
Path: "[local] test-volume-name.vmdk",
Size: 100,
Fstype: "ext4",
StoragePolicyName: "gold",
StoragePolicyID: "1234",
}
return volSpec, nil
}
func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error {
if vd.volPath != "[local] test-volume-name.vmdk" {
return fmt.Errorf("Deleter got unexpected volume path: %s", vd.volPath)
}
return nil
}
func TestPlugin(t *testing.T) {
// Initial setup to test volume plugin
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: "[local] test-volume-name.vmdk",
FSType: "ext4",
},
},
}
// Test Mounter
fakeManager := &fakePDManager{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
mntPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~vsphere-volume/vol1")
path := mounter.GetPath()
if path != mntPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err)
}
// Test Unmounter
fakeManager = &fakePDManager{}
unmounter, err := plug.(*vsphereVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*vsphereVolumePlugin).newProvisionerInternal(options, &fakePDManager{})
if err != nil {
t.Errorf("newProvisionerInternal() failed: %v", err)
}
persistentSpec, err := provisioner.Provision(nil, nil)
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath != "[local] test-volume-name.vmdk" {
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
}
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName != "gold" {
t.Errorf("Provision() returned unexpected storagepolicy name %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*vsphereVolumePlugin).newDeleterInternal(volSpec, &fakePDManager{})
if err != nil {
t.Errorf("newDeleterInternal() failed: %v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}
func TestUnsupportedCloudProvider(t *testing.T) {
// Initial setup to test volume plugin
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
testcases := []struct {
name string
cloudProvider cloudprovider.Interface
success bool
}{
{name: "nil cloudprovider", cloudProvider: nil},
{name: "vSphere", cloudProvider: &vsphere.VSphere{}, success: true},
{name: "fake cloudprovider", cloudProvider: &fake.Cloud{}},
}
for _, tc := range testcases {
t.Logf("test case: %v", tc.name)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil, /* prober */
volumetest.NewFakeKubeletVolumeHostWithCloudProvider(t, tmpDir, nil, nil, tc.cloudProvider))
plug, err := plugMgr.FindAttachablePluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.NewAttacher()
if !tc.success && err == nil {
t.Errorf("expected error when creating attacher due to incorrect cloud provider, but got none")
} else if tc.success && err != nil {
t.Errorf("expected no error when creating attacher, but got error: %v", err)
}
_, err = plug.NewDetacher()
if !tc.success && err == nil {
t.Errorf("expected error when creating detacher due to incorrect cloud provider, but got none")
} else if tc.success && err != nil {
t.Errorf("expected no error when creating detacher, but got error: %v", err)
}
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Fatal("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -1,281 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
v1 "k8s.io/api/core/v1"
cloudprovider "k8s.io/cloud-provider"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/vsphere"
"k8s.io/legacy-cloud-providers/vsphere/vclib"
)
const (
checkSleepDuration = time.Second
diskByIDPath = "/dev/disk/by-id/"
diskSCSIPrefix = "wwn-0x"
// diskformat parameter is deprecated as of Kubernetes v1.21.0
diskformat = "diskformat"
datastore = "datastore"
StoragePolicyName = "storagepolicyname"
// hostfailurestotolerate parameter is deprecated as of Kubernetes v1.19.0
HostFailuresToTolerateCapability = "hostfailurestotolerate"
// forceprovisioning parameter is deprecated as of Kubernetes v1.19.0
ForceProvisioningCapability = "forceprovisioning"
// cachereservation parameter is deprecated as of Kubernetes v1.19.0
CacheReservationCapability = "cachereservation"
// diskstripes parameter is deprecated as of Kubernetes v1.19.0
DiskStripesCapability = "diskstripes"
// objectspacereservation parameter is deprecated as of Kubernetes v1.19.0
ObjectSpaceReservationCapability = "objectspacereservation"
// iopslimit parameter is deprecated as of Kubernetes v1.19.0
IopsLimitCapability = "iopslimit"
HostFailuresToTolerateCapabilityMin = 0
HostFailuresToTolerateCapabilityMax = 3
ForceProvisioningCapabilityMin = 0
ForceProvisioningCapabilityMax = 1
CacheReservationCapabilityMin = 0
CacheReservationCapabilityMax = 100
DiskStripesCapabilityMin = 1
DiskStripesCapabilityMax = 12
ObjectSpaceReservationCapabilityMin = 0
ObjectSpaceReservationCapabilityMax = 100
IopsLimitCapabilityMin = 0
// reduce number of characters in vsphere volume name. The reason for setting length smaller than 255 is because typically
// volume name also becomes part of mount path - /var/lib/kubelet/plugins/kubernetes.io/vsphere-volume/mounts/<name>
// and systemd has a limit of 256 chars in a unit name - https://github.com/systemd/systemd/pull/14294
// so if we subtract the kubelet path prefix from 256, we are left with 191 characters.
// Since datastore name is typically part of volumeName we are choosing a shorter length of 63
// and leaving room of certain characters being escaped etc.
// Given that volume name is typically of the form - pvc-0f13e3ad-97f8-41ab-9392-84562ef40d17.vmdk (45 chars),
// this should still leave plenty of room for clusterName inclusion.
maxVolumeLength = 63
)
var ErrProbeVolume = errors.New("error scanning attached volumes")
type VsphereDiskUtil struct{}
type VolumeSpec struct {
Path string
Size int
Fstype string
StoragePolicyID string
StoragePolicyName string
Labels map[string]string
}
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error) {
var fstype string
cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// vSphere works with KiB, but its minimum allocation unit is 1 MiB
volSizeMiB, err := volumehelpers.RoundUpToMiBInt(capacity)
if err != nil {
return nil, err
}
volSizeKiB := volSizeMiB * 1024
name := volumeutil.GenerateVolumeName(v.options.ClusterName, v.options.PVName, maxVolumeLength)
volumeOptions := &vclib.VolumeOptions{
CapacityKB: volSizeKiB,
Tags: *v.options.CloudTags,
Name: name,
}
volumeOptions.Zone = selectedZone
volumeOptions.SelectedNode = selectedNode
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for parameter, value := range v.options.Parameters {
switch strings.ToLower(parameter) {
case diskformat:
volumeOptions.DiskFormat = value
case datastore:
volumeOptions.Datastore = value
case volume.VolumeParameterFSType:
fstype = value
klog.V(4).Infof("Setting fstype as %q", fstype)
case StoragePolicyName:
volumeOptions.StoragePolicyName = value
klog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName)
case HostFailuresToTolerateCapability, ForceProvisioningCapability,
CacheReservationCapability, DiskStripesCapability,
ObjectSpaceReservationCapability, IopsLimitCapability:
capabilityData, err := validateVSANCapability(strings.ToLower(parameter), value)
if err != nil {
return nil, err
}
volumeOptions.VSANStorageProfileData += capabilityData
default:
return nil, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
}
}
if volumeOptions.VSANStorageProfileData != "" {
if volumeOptions.StoragePolicyName != "" {
return nil, fmt.Errorf("cannot specify storage policy capabilities along with storage policy name. Please specify only one")
}
volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")"
}
klog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData)
// TODO: implement PVC.Selector parsing
if v.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
}
vmDiskPath, err := cloud.CreateVolume(volumeOptions)
if err != nil {
return nil, err
}
labels, err := cloud.GetVolumeLabels(vmDiskPath)
if err != nil {
return nil, err
}
volSpec = &VolumeSpec{
Path: vmDiskPath,
Size: volSizeKiB,
Fstype: fstype,
StoragePolicyName: volumeOptions.StoragePolicyName,
StoragePolicyID: volumeOptions.StoragePolicyID,
Labels: labels,
}
klog.V(2).Infof("Successfully created vsphere volume %s", name)
return volSpec, nil
}
// DeleteVolume deletes a vSphere volume.
func (util *VsphereDiskUtil) DeleteVolume(vd *vsphereVolumeDeleter) error {
cloud, err := getCloudProvider(vd.plugin.host.GetCloudProvider())
if err != nil {
return err
}
if err = cloud.DeleteVolume(vd.volPath); err != nil {
klog.V(2).Infof("Error deleting vsphere volume %s: %v", vd.volPath, err)
return err
}
klog.V(2).Infof("Successfully deleted vsphere volume %s", vd.volPath)
return nil
}
func getVolPathfromVolumeName(deviceMountPath string) string {
// Assumption: No file or folder is named starting with '[' in datastore
volPath := deviceMountPath[strings.LastIndex(deviceMountPath, "["):]
// space between datastore and vmdk name in volumePath is encoded as '\040' when returned by GetMountRefs().
// volumePath eg: "[local] xxx.vmdk" provided to attach/mount
// replacing \040 with space to match the actual volumePath
return strings.Replace(volPath, "\\040", " ", -1)
}
func getCloudProvider(cloud cloudprovider.Interface) (*vsphere.VSphere, error) {
if cloud == nil {
klog.Errorf("Cloud provider not initialized properly")
return nil, errors.New("cloud provider not initialized properly")
}
vs, ok := cloud.(*vsphere.VSphere)
if !ok || vs == nil {
return nil, errors.New("invalid cloud provider: expected vSphere")
}
return vs, nil
}
// Validate the capability requirement for the user specified policy attributes.
func validateVSANCapability(capabilityName string, capabilityValue string) (string, error) {
var capabilityData string
capabilityIntVal, ok := verifyCapabilityValueIsInteger(capabilityValue)
if !ok {
return "", fmt.Errorf("invalid value for %s. The capabilityValue: %s must be a valid integer value", capabilityName, capabilityValue)
}
switch strings.ToLower(capabilityName) {
case HostFailuresToTolerateCapability:
if capabilityIntVal >= HostFailuresToTolerateCapabilityMin && capabilityIntVal <= HostFailuresToTolerateCapabilityMax {
capabilityData = " (\"hostFailuresToTolerate\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`invalid value for hostFailuresToTolerate.
The default value is %d, minimum value is %d and maximum value is %d`,
1, HostFailuresToTolerateCapabilityMin, HostFailuresToTolerateCapabilityMax)
}
case ForceProvisioningCapability:
if capabilityIntVal >= ForceProvisioningCapabilityMin && capabilityIntVal <= ForceProvisioningCapabilityMax {
capabilityData = " (\"forceProvisioning\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`invalid value for forceProvisioning.
The value can be either %d or %d`,
ForceProvisioningCapabilityMin, ForceProvisioningCapabilityMax)
}
case CacheReservationCapability:
if capabilityIntVal >= CacheReservationCapabilityMin && capabilityIntVal <= CacheReservationCapabilityMax {
capabilityData = " (\"cacheReservation\" i" + strconv.Itoa(capabilityIntVal*10000) + ")"
} else {
return "", fmt.Errorf(`invalid value for cacheReservation.
The minimum percentage is %d and maximum percentage is %d`,
CacheReservationCapabilityMin, CacheReservationCapabilityMax)
}
case DiskStripesCapability:
if capabilityIntVal >= DiskStripesCapabilityMin && capabilityIntVal <= DiskStripesCapabilityMax {
capabilityData = " (\"stripeWidth\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`invalid value for diskStripes.
The minimum value is %d and maximum value is %d`,
DiskStripesCapabilityMin, DiskStripesCapabilityMax)
}
case ObjectSpaceReservationCapability:
if capabilityIntVal >= ObjectSpaceReservationCapabilityMin && capabilityIntVal <= ObjectSpaceReservationCapabilityMax {
capabilityData = " (\"proportionalCapacity\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`invalid value for ObjectSpaceReservation.
The minimum percentage is %d and maximum percentage is %d`,
ObjectSpaceReservationCapabilityMin, ObjectSpaceReservationCapabilityMax)
}
case IopsLimitCapability:
if capabilityIntVal >= IopsLimitCapabilityMin {
capabilityData = " (\"iopsLimit\" i" + capabilityValue + ")"
} else {
return "", fmt.Errorf(`invalid value for iopsLimit.
The value should be greater than %d`, IopsLimitCapabilityMin)
}
}
return capabilityData, nil
}
// Verify if the capability value is of type integer.
func verifyCapabilityValueIsInteger(capabilityValue string) (int, bool) {
i, err := strconv.Atoi(capabilityValue)
if err != nil {
return -1, false
}
return i, true
}

View File

@ -1,36 +0,0 @@
//go:build !providerless && linux
// +build !providerless,linux
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"fmt"
"k8s.io/mount-utils"
)
func verifyDevicePath(path string) (string, error) {
if pathExists, err := mount.PathExists(path); err != nil {
return "", fmt.Errorf("error checking if path exists: %w", err)
} else if pathExists {
return path, nil
}
return "", nil
}

View File

@ -1,26 +0,0 @@
//go:build !providerless && !linux && !windows
// +build !providerless,!linux,!windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import "errors"
func verifyDevicePath(path string) (string, error) {
return "", errors.New("unsupported")
}

View File

@ -1,68 +0,0 @@
//go:build !providerless && windows
// +build !providerless,windows
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"encoding/json"
"fmt"
"os/exec"
"strings"
"k8s.io/klog/v2"
)
type diskInfoResult struct {
Number json.Number
SerialNumber string
}
func verifyDevicePath(path string) (string, error) {
if !strings.Contains(path, diskByIDPath) {
// If this volume has already been mounted then
// its devicePath will have already been converted to a disk number
klog.V(4).Infof("Found vSphere disk attached with disk number %v", path)
return path, nil
}
// NOTE: If a powershell command that would return an array (e.g.: Get-Disk) would return an array of
// one element, powershell will in fact return that object directly, and **not an array containing
// that elemenent, which means piping it to ConvertTo-Json would not result in array as expected below.
// The following syntax forces it to always be an array.
cmd := exec.Command("powershell", "/c", "Get-Disk | Select Number, SerialNumber | ConvertTo-JSON")
output, err := cmd.Output()
if err != nil {
klog.Errorf("Get-Disk failed, error: %v, output: %q", err, string(output))
return "", err
}
var results []diskInfoResult
if err = json.Unmarshal(output, &results); err != nil {
klog.Errorf("Failed to unmarshal Get-Disk json, output: %q", string(output))
return "", err
}
serialNumber := strings.TrimPrefix(path, diskByIDPath+diskSCSIPrefix)
for _, v := range results {
if v.SerialNumber == serialNumber {
klog.V(4).Infof("Found vSphere disk attached with serial %v", serialNumber)
return v.Number.String(), nil
}
}
return "", fmt.Errorf("unable to find vSphere disk with serial %v", serialNumber)
}

View File

@ -1,43 +0,0 @@
//go:build !providerless && windows
// +build !providerless,windows
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere_volume
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestFormatIfNotFormatted(t *testing.T) {
// If this volume has already been mounted then
// its devicePath will have already been converted to a disk number,
// meaning that the original path is returned.
devPath, err := verifyDevicePath("foo")
require.NoError(t, err)
assert.Equal(t, "foo", devPath)
// Won't match any serial number, meaning that an error will be returned.
devPath, err = verifyDevicePath(diskByIDPath + diskSCSIPrefix + "fake-serial")
expectedErrMsg := `unable to find vSphere disk with serial fake-serial`
if err == nil || err.Error() != expectedErrMsg {
t.Errorf("expected error message `%s` but got `%v`", expectedErrMsg, err)
}
}