Merge pull request #40118 from vmware/FixdetachVolumeOnNodeOff

Automatic merge from submit-queue (batch tested with PRs 41037, 40118, 40959, 41084, 41092)

Fix for detach volume when node is not present/ powered off

Fixes #33061 
When a vm is reported as no longer present in cloud provider and is deleted by node controller, there are no attempts to detach respective volumes. For example, if a VM is powered off or paused, and pods are migrated to other nodes. In the case of vSphere, the VM cannot be started again because the VM still holds mount points to volumes that are now mounted to other VMs.

In order to re-join this node again, you will have to manually detach these volumes from the powered off vm before starting it.

The current fix will make sure the mount points are deleted when the VM is powered off. Since all the mount points are deleted, the VM can be powered on again.

This is a workaround proposal only. I still don't see the kubernetes issuing a detach request to the vsphere cloud provider which should be the case. (Details in original issue #33061 )

@luomiao @kerneltime @pdhamdhere @jingxu97 @saad-ali
This commit is contained in:
Kubernetes Submit Queue 2017-02-09 16:44:40 -08:00 committed by GitHub
commit 052f3b9d4c

View File

@ -924,11 +924,12 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DiskIsAttached will assume vmdk %q is not attached to it.",
vSphereInstance,
volPath)
return false, nil
glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist",
volPath,
vSphereInstance)
return false, fmt.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist",
volPath,
vSphereInstance)
}
// Get VM device list
@ -975,11 +976,12 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DisksAreAttached will assume vmdk %v are not attached to it.",
vSphereInstance,
volPaths)
return attached, nil
glog.Errorf("DisksAreAttached failed to determine whether disks %v are still attached: node %q does not exist",
volPaths,
vSphereInstance)
return attached, fmt.Errorf("DisksAreAttached failed to determine whether disks %v are still attached: node %q does not exist",
volPaths,
vSphereInstance)
}
// Get VM device list
@ -1145,21 +1147,6 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
vSphereInstance = nodeNameToVMName(nodeName)
}
nodeExist, err := vs.NodeExists(vs.client, nodeName)
if err != nil {
glog.Errorf("Failed to check whether node exist. err: %s.", err)
return err
}
if !nodeExist {
glog.Warningf(
"Node %q does not exist. DetachDisk will assume vmdk %q is not attached to it.",
nodeName,
volPath)
return nil
}
vm, vmDevices, _, dc, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
if err != nil {
return err