mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Add dangling volume check for vsphere
This commit is contained in:
parent
71fea80155
commit
6c4c5ab691
@ -275,7 +275,6 @@ func (plugin *vsphereVolumePlugin) NewDeviceUnmounter() (volume.DeviceUnmounter,
|
||||
|
||||
// Detach the given device from the given node.
|
||||
func (detacher *vsphereVMDKDetacher) Detach(volumeName string, nodeName types.NodeName) error {
|
||||
|
||||
volPath := getVolPathfromVolumeName(volumeName)
|
||||
attached, newVolumePath, err := detacher.vsphereVolumes.DiskIsAttached(volPath, nodeName)
|
||||
if err != nil {
|
||||
|
@ -17,6 +17,7 @@ go_library(
|
||||
"vsphere_util_linux.go",
|
||||
"vsphere_util_unsupported.go",
|
||||
"vsphere_util_windows.go",
|
||||
"vsphere_volume_map.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/vsphere",
|
||||
importpath = "k8s.io/legacy-cloud-providers/vsphere",
|
||||
@ -31,6 +32,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/node/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/errors:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib:go_default_library",
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers:go_default_library",
|
||||
@ -54,6 +56,7 @@ go_test(
|
||||
"credentialmanager_test.go",
|
||||
"vsphere_test.go",
|
||||
"vsphere_util_test.go",
|
||||
"vsphere_volume_map_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@ -70,6 +73,7 @@ go_test(
|
||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/lookup/simulator:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/property:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/simulator:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/simulator/vpx:go_default_library",
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/soap"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"k8s.io/klog/v2"
|
||||
@ -181,14 +180,6 @@ func IsInvalidCredentialsError(err error) bool {
|
||||
return isInvalidCredentialsError
|
||||
}
|
||||
|
||||
// VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM.
|
||||
func VerifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
|
||||
// Verify if the volume paths are present on the VM backing virtual disk devices
|
||||
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
|
||||
VerifyVolumePathsForVMDevices(vmDevices, volPaths, nodeName, nodeVolumeMap)
|
||||
|
||||
}
|
||||
|
||||
// VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM.
|
||||
func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) {
|
||||
volPathsMap := make(map[string]bool)
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -48,6 +49,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
nodehelpers "k8s.io/cloud-provider/node/helpers"
|
||||
volerr "k8s.io/cloud-provider/volume/errors"
|
||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
@ -95,6 +97,7 @@ type VSphere struct {
|
||||
hostName string
|
||||
// Maps the VSphere IP address to VSphereInstance
|
||||
vsphereInstanceMap map[string]*VSphereInstance
|
||||
vsphereVolumeMap *VsphereVolumeMap
|
||||
// Responsible for managing discovery of k8s node, their location etc.
|
||||
nodeManager *NodeManager
|
||||
vmUUID string
|
||||
@ -542,6 +545,7 @@ func buildVSphereFromConfig(cfg VSphereConfig) (*VSphere, error) {
|
||||
nodeInfoMap: make(map[string]*NodeInfo),
|
||||
registeredNodes: make(map[string]*v1.Node),
|
||||
},
|
||||
vsphereVolumeMap: NewVsphereVolumeMap(),
|
||||
isSecretInfoProvided: isSecretInfoProvided,
|
||||
isSecretManaged: !cfg.Global.SecretNotManaged,
|
||||
cfg: &cfg,
|
||||
@ -951,6 +955,20 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
|
||||
}
|
||||
}
|
||||
klog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
|
||||
if err != nil {
|
||||
// if attach failed, we should check if disk is attached somewhere else. This can happen for several reasons
|
||||
// and throwing a dangling volume error here will allow attach-detach controller to detach disk from a node
|
||||
// where it is not needed.
|
||||
existingNode, ok := vs.vsphereVolumeMap.CheckForVolume(vmDiskPath)
|
||||
if ok {
|
||||
attached, newVolumePath, diskAttachedError := vs.DiskIsAttached(vmDiskPath, existingNode)
|
||||
// if disk is attached somewhere else then we can throw a dangling error
|
||||
if diskAttachedError == nil && attached && (nodeName != existingNode) {
|
||||
klog.V(3).Infof("found dangling volume %s to node %s", vmDiskPath, existingNode)
|
||||
return "", volerr.NewDanglingError(err.Error(), existingNode, newVolumePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
|
||||
return diskUUID, err
|
||||
}
|
||||
@ -1084,6 +1102,7 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
|
||||
// 5b. If VMs are removed from vSphere inventory they are ignored.
|
||||
func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
|
||||
disksAreAttachedInternal := func(nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName]map[string]bool, error) {
|
||||
vs.vsphereVolumeMap.StartDiskVerification()
|
||||
|
||||
// disksAreAttach checks whether disks are attached to the nodes.
|
||||
// Returns nodes that need to be retried if retry is true
|
||||
@ -1195,7 +1214,10 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
|
||||
for nodeName, volPaths := range attached {
|
||||
disksAttached[convertToK8sType(nodeName)] = volPaths
|
||||
}
|
||||
|
||||
}
|
||||
// any volume which we could not verify will be removed from the map.
|
||||
vs.vsphereVolumeMap.RemoveUnverified()
|
||||
klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
|
||||
return disksAttached, nil
|
||||
}
|
||||
|
@ -569,6 +569,7 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
|
||||
return nodesToRetry, err
|
||||
}
|
||||
klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm)
|
||||
vs.vsphereVolumeMap.Add(nodeName, devices)
|
||||
vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached)
|
||||
}
|
||||
}
|
||||
@ -599,7 +600,10 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
|
||||
}
|
||||
nodeUUID = strings.ToLower(nodeUUID)
|
||||
klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap)
|
||||
vclib.VerifyVolumePathsForVM(vmMoMap[nodeUUID], nodeVolumes[nodeName], convertToString(nodeName), attached)
|
||||
vmMo := vmMoMap[nodeUUID]
|
||||
vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device)
|
||||
vs.vsphereVolumeMap.Add(nodeName, vmDevices)
|
||||
vclib.VerifyVolumePathsForVMDevices(vmDevices, nodeVolumes[nodeName], convertToString(nodeName), attached)
|
||||
}
|
||||
return nodesToRetry, nil
|
||||
}
|
||||
|
@ -0,0 +1,96 @@
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
type volumePath string
|
||||
|
||||
type nodeVolumeStatus struct {
|
||||
nodeName k8stypes.NodeName
|
||||
verified bool
|
||||
}
|
||||
|
||||
// VsphereVolumeMap stores last known state of node and volume mapping
|
||||
type VsphereVolumeMap struct {
|
||||
volumeNodeMap map[volumePath]nodeVolumeStatus
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewVsphereVolumeMap() *VsphereVolumeMap {
|
||||
return &VsphereVolumeMap{
|
||||
volumeNodeMap: map[volumePath]nodeVolumeStatus{},
|
||||
}
|
||||
}
|
||||
|
||||
// StartDiskVerification marks all known volumes as unverified so as
|
||||
// disks which aren't verified can be removed at the end of verification process
|
||||
func (vsphereVolume *VsphereVolumeMap) StartDiskVerification() {
|
||||
vsphereVolume.lock.Lock()
|
||||
defer vsphereVolume.lock.Unlock()
|
||||
for k, v := range vsphereVolume.volumeNodeMap {
|
||||
v.verified = false
|
||||
vsphereVolume.volumeNodeMap[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// CheckForVolume verifies if disk is attached to some node in the cluster.
|
||||
// This check is not definitive and should be followed up by separate verification.
|
||||
func (vsphereVolume *VsphereVolumeMap) CheckForVolume(path string) (k8stypes.NodeName, bool) {
|
||||
vsphereVolume.lock.RLock()
|
||||
defer vsphereVolume.lock.RUnlock()
|
||||
vPath := volumePath(path)
|
||||
ns, ok := vsphereVolume.volumeNodeMap[vPath]
|
||||
if ok {
|
||||
return ns.nodeName, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Add all devices found on a node to the device map
|
||||
func (vsphereVolume *VsphereVolumeMap) Add(node k8stypes.NodeName, vmDevices object.VirtualDeviceList) {
|
||||
vsphereVolume.lock.Lock()
|
||||
defer vsphereVolume.lock.Unlock()
|
||||
for _, device := range vmDevices {
|
||||
if vmDevices.TypeName(device) == "VirtualDisk" {
|
||||
virtualDevice := device.GetVirtualDevice()
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
filename := volumePath(backing.FileName)
|
||||
vsphereVolume.volumeNodeMap[filename] = nodeVolumeStatus{node, true}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUnverified will remove any device which we could not verify to be attached to a node.
|
||||
func (vsphereVolume *VsphereVolumeMap) RemoveUnverified() {
|
||||
vsphereVolume.lock.Lock()
|
||||
defer vsphereVolume.lock.Unlock()
|
||||
for k, v := range vsphereVolume.volumeNodeMap {
|
||||
if !v.verified {
|
||||
delete(vsphereVolume.volumeNodeMap, k)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,92 @@
|
||||
// +build !providerless
|
||||
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vsphere
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestVsphereVolumeMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deviceToAdd object.VirtualDeviceList
|
||||
nodeToAdd k8stypes.NodeName
|
||||
volumeToCheck string
|
||||
runVerification bool
|
||||
expectInMap bool
|
||||
}{
|
||||
{
|
||||
name: "adding new volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/foo.vmdk",
|
||||
expectInMap: true,
|
||||
},
|
||||
{
|
||||
name: "mismatching volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/bar.vmdk",
|
||||
expectInMap: false,
|
||||
},
|
||||
{
|
||||
name: "should remove unverified devices",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/foo.vmdk",
|
||||
runVerification: true,
|
||||
expectInMap: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vMap := NewVsphereVolumeMap()
|
||||
vMap.Add(tc.nodeToAdd, tc.deviceToAdd)
|
||||
|
||||
if tc.runVerification {
|
||||
vMap.StartDiskVerification()
|
||||
vMap.RemoveUnverified()
|
||||
}
|
||||
_, ok := vMap.CheckForVolume(tc.volumeToCheck)
|
||||
if ok != tc.expectInMap {
|
||||
t.Errorf("error checking volume %s, expected %v got %v", tc.volumeToCheck, tc.expectInMap, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getVirtualDeviceList(vPath string) object.VirtualDeviceList {
|
||||
return object.VirtualDeviceList{
|
||||
&types.VirtualDisk{
|
||||
VirtualDevice: types.VirtualDevice{
|
||||
Key: 1000,
|
||||
Backing: &types.VirtualDiskFlatVer2BackingInfo{
|
||||
VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{
|
||||
FileName: vPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user