mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
Add docs about process of discovering disks from new nodes
This commit is contained in:
parent
46a57b0f5c
commit
b6ae233c40
@ -611,15 +611,15 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
|
||||
|
||||
// BuildMissingVolumeNodeMap builds a map of volumes and nodes which are not known to attach detach controller.
|
||||
// There could be nodes in cluster which do not have any pods with vsphere volumes running on them
|
||||
// such nodes won't be part of disk verificatio check because attach-detach controller does not keep track
|
||||
// such nodes won't be part of disk verification check because attach-detach controller does not keep track
|
||||
// such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the
|
||||
// remaining nodes which weren't scanned by code previously.
|
||||
func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) {
|
||||
nodeDetails := vs.nodeManager.GetNodeNames()
|
||||
nodeNames := vs.nodeManager.GetNodeNames()
|
||||
// Segregate nodes according to VC-DC
|
||||
dcNodes := make(map[string][]k8stypes.NodeName)
|
||||
|
||||
for _, nodeName := range nodeDetails {
|
||||
for _, nodeName := range nodeNames {
|
||||
// if given node is not in node volume map
|
||||
if !vs.vsphereVolumeMap.CheckForNode(nodeName) {
|
||||
nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName)
|
||||
|
@ -36,14 +36,14 @@ type nodeVolumeStatus struct {
|
||||
// VsphereVolumeMap stores last known state of node and volume mapping
|
||||
type VsphereVolumeMap struct {
|
||||
volumeNodeMap map[volumePath]nodeVolumeStatus
|
||||
knownNodes map[k8stypes.NodeName]bool
|
||||
nodeMap map[k8stypes.NodeName]bool
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewVsphereVolumeMap() *VsphereVolumeMap {
|
||||
return &VsphereVolumeMap{
|
||||
volumeNodeMap: map[volumePath]nodeVolumeStatus{},
|
||||
knownNodes: map[k8stypes.NodeName]bool{},
|
||||
nodeMap: map[k8stypes.NodeName]bool{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,6 +56,9 @@ func (vsphereVolume *VsphereVolumeMap) StartDiskVerification() {
|
||||
v.verified = false
|
||||
vsphereVolume.volumeNodeMap[k] = v
|
||||
}
|
||||
// reset nodeMap to empty so that any node we could not verify via usual verification process
|
||||
// can still be verified.
|
||||
vsphereVolume.nodeMap = map[k8stypes.NodeName]bool{}
|
||||
}
|
||||
|
||||
// CheckForVolume verifies if disk is attached to some node in the cluster.
|
||||
@ -74,11 +77,8 @@ func (vsphereVolume *VsphereVolumeMap) CheckForVolume(path string) (k8stypes.Nod
|
||||
func (vsphereVolume *VsphereVolumeMap) CheckForNode(nodeName k8stypes.NodeName) bool {
|
||||
vsphereVolume.lock.RLock()
|
||||
defer vsphereVolume.lock.RUnlock()
|
||||
_, ok := vsphereVolume.knownNodes[nodeName]
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
_, ok := vsphereVolume.nodeMap[nodeName]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Add all devices found on a node to the device map
|
||||
@ -91,7 +91,7 @@ func (vsphereVolume *VsphereVolumeMap) Add(node k8stypes.NodeName, vmDevices obj
|
||||
if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
filename := volumePath(backing.FileName)
|
||||
vsphereVolume.volumeNodeMap[filename] = nodeVolumeStatus{node, true}
|
||||
vsphereVolume.knownNodes[node] = true
|
||||
vsphereVolume.nodeMap[node] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,7 +104,7 @@ func (vsphereVolume *VsphereVolumeMap) RemoveUnverified() {
|
||||
for k, v := range vsphereVolume.volumeNodeMap {
|
||||
if !v.verified {
|
||||
delete(vsphereVolume.volumeNodeMap, k)
|
||||
delete(vsphereVolume.knownNodes, v.nodeName)
|
||||
delete(vsphereVolume.nodeMap, v.nodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,34 +28,66 @@ import (
|
||||
|
||||
func TestVsphereVolumeMap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deviceToAdd object.VirtualDeviceList
|
||||
nodeToAdd k8stypes.NodeName
|
||||
volumeToCheck string
|
||||
runVerification bool
|
||||
expectInMap bool
|
||||
name string
|
||||
deviceToAdd object.VirtualDeviceList
|
||||
nodeToAdd k8stypes.NodeName
|
||||
checkRunner func(volumeMap *VsphereVolumeMap)
|
||||
}{
|
||||
{
|
||||
name: "adding new volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/foo.vmdk",
|
||||
expectInMap: true,
|
||||
name: "adding new volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
checkRunner: func(volumeMap *VsphereVolumeMap) {
|
||||
volumeToCheck := "[foobar] kubevols/foo.vmdk"
|
||||
_, ok := volumeMap.CheckForVolume(volumeToCheck)
|
||||
if !ok {
|
||||
t.Errorf("error checking volume %s, expected true got %v", volumeToCheck, ok)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mismatching volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/bar.vmdk",
|
||||
expectInMap: false,
|
||||
name: "mismatching volume",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
checkRunner: func(volumeMap *VsphereVolumeMap) {
|
||||
volumeToCheck := "[foobar] kubevols/bar.vmdk"
|
||||
_, ok := volumeMap.CheckForVolume(volumeToCheck)
|
||||
if ok {
|
||||
t.Errorf("error checking volume %s, expected false got %v", volumeToCheck, ok)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should remove unverified devices",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
volumeToCheck: "[foobar] kubevols/foo.vmdk",
|
||||
runVerification: true,
|
||||
expectInMap: false,
|
||||
name: "should remove unverified devices",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
checkRunner: func(volumeMap *VsphereVolumeMap) {
|
||||
volumeMap.StartDiskVerification()
|
||||
volumeMap.RemoveUnverified()
|
||||
volumeToCheck := "[foobar] kubevols/foo.vmdk"
|
||||
_, ok := volumeMap.CheckForVolume(volumeToCheck)
|
||||
if ok {
|
||||
t.Errorf("error checking volume %s, expected false got %v", volumeToCheck, ok)
|
||||
}
|
||||
node := k8stypes.NodeName("node1.lan")
|
||||
ok = volumeMap.CheckForNode(node)
|
||||
if ok {
|
||||
t.Errorf("unexpected node %s in node map", node)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node check should return false for previously added node",
|
||||
deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"),
|
||||
nodeToAdd: convertToK8sType("node1.lan"),
|
||||
checkRunner: func(volumeMap *VsphereVolumeMap) {
|
||||
volumeMap.StartDiskVerification()
|
||||
node := k8stypes.NodeName("node1.lan")
|
||||
ok := volumeMap.CheckForNode(node)
|
||||
if ok {
|
||||
t.Errorf("unexpected node %s in node map", node)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -63,16 +95,7 @@ func TestVsphereVolumeMap(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vMap := NewVsphereVolumeMap()
|
||||
vMap.Add(tc.nodeToAdd, tc.deviceToAdd)
|
||||
|
||||
if tc.runVerification {
|
||||
vMap.StartDiskVerification()
|
||||
vMap.RemoveUnverified()
|
||||
}
|
||||
_, ok := vMap.CheckForVolume(tc.volumeToCheck)
|
||||
nodeOk := vMap.CheckForNode(tc.nodeToAdd)
|
||||
if ok != tc.expectInMap && nodeOk != tc.expectInMap {
|
||||
t.Errorf("error checking volume %s, expected %v got %v", tc.volumeToCheck, tc.expectInMap, ok)
|
||||
}
|
||||
tc.checkRunner(vMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user