mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Address review comments
This commit is contained in:
parent
b6ae233c40
commit
1a323279a2
@ -311,6 +311,8 @@ func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
|
|||||||
return nodeDetails, nil
|
return nodeDetails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetNodeNames returns list of nodes that are known to vsphere cloudprovider.
|
||||||
|
// These are typically nodes that make up k8s cluster.
|
||||||
func (nm *NodeManager) GetNodeNames() []k8stypes.NodeName {
|
func (nm *NodeManager) GetNodeNames() []k8stypes.NodeName {
|
||||||
nodes := nm.getNodes()
|
nodes := nm.getNodes()
|
||||||
var nodeNameList []k8stypes.NodeName
|
var nodeNameList []k8stypes.NodeName
|
||||||
|
@ -1221,7 +1221,7 @@ func (vs *VSphere) DisksAreAttached(nodeVolumes map[k8stypes.NodeName][]string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
klog.V(4).Infof("DisksAreAttach successfully executed. result: %+v", attached)
|
klog.V(4).Infof("DisksAreAttached successfully executed. result: %+v", attached)
|
||||||
// There could be nodes in cluster which do not have any pods with vsphere volumes running on them
|
// There could be nodes in cluster which do not have any pods with vsphere volumes running on them
|
||||||
// such nodes won't be part of nodeVolumes map because attach-detach controller does not keep track
|
// such nodes won't be part of nodeVolumes map because attach-detach controller does not keep track
|
||||||
// such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the
|
// such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the
|
||||||
|
@ -636,6 +636,7 @@ func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) {
|
|||||||
|
|
||||||
for _, nodeNames := range dcNodes {
|
for _, nodeNames := range dcNodes {
|
||||||
// Start go routines per VC-DC to check disks are attached
|
// Start go routines per VC-DC to check disks are attached
|
||||||
|
wg.Add(1)
|
||||||
go func(nodes []k8stypes.NodeName) {
|
go func(nodes []k8stypes.NodeName) {
|
||||||
err := vs.checkNodeDisks(ctx, nodeNames)
|
err := vs.checkNodeDisks(ctx, nodeNames)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -643,7 +644,6 @@ func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}(nodeNames)
|
}(nodeNames)
|
||||||
wg.Add(1)
|
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,9 @@ func (vsphereVolume *VsphereVolumeMap) CheckForVolume(path string) (k8stypes.Nod
|
|||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckForNode returns true if given node has already been processed by volume
|
||||||
|
// verification mechanism. This is used to skip verifying attached disks on nodes
|
||||||
|
// which were previously verified.
|
||||||
func (vsphereVolume *VsphereVolumeMap) CheckForNode(nodeName k8stypes.NodeName) bool {
|
func (vsphereVolume *VsphereVolumeMap) CheckForNode(nodeName k8stypes.NodeName) bool {
|
||||||
vsphereVolume.lock.RLock()
|
vsphereVolume.lock.RLock()
|
||||||
defer vsphereVolume.lock.RUnlock()
|
defer vsphereVolume.lock.RUnlock()
|
||||||
|
Loading…
Reference in New Issue
Block a user