Merge pull request #56648 from vmware/fix_attach_bug

Automatic merge from submit-queue (batch tested with PRs 56480, 56675, 56624, 56648, 56658). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix session out in vSphere Cloud Provider 

**What this PR does / why we need it**:
When the disk is attached error is returned in case of VM migration but the disk is attached successfully. When pvc is created for provisioning volume dynamically the volume is not provisioned since the vc session was expired and not renewed. This PR fixes both the issues.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes https://github.com/vmware/kubernetes/issues/393  https://github.com/vmware/kubernetes/issues/391

**Special notes for your reviewer**:
Internally review PR here: https://github.com/vmware/kubernetes/pull/396

**Release note**:

```release-note
NONE
```

Test Done:

Test for fix https://github.com/vmware/kubernetes/issues/391 (Prints error on attach disk)
- Create storageclass and pvc to provision volume dynamically.
- Migrated VM to different VC.
- Created Pod with volume provisioned at step 1.
- Executed command kubectl describe pod.
After Fix: Didn't find the error message.

Test for fix https://github.com/vmware/kubernetes/pull/396 (Session out)
Tests which reported this issue: go run hack/e2e.go --check-version-skew=false --v --test '--test_args=--ginkgo.focus=Selector-Label\sVolume\sBinding:vsphere'
After Fix:  This tests didn't report any errors.
This commit is contained in:
Kubernetes Submit Queue 2017-12-16 03:24:48 -08:00 committed by GitHub
commit f0bd07f879
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 39 additions and 9 deletions

View File

@ -265,14 +265,33 @@ func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error)
return *nodeInfo, nil
}
func (nm *NodeManager) GetNodeDetails() []NodeDetails {
func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) {
nm.nodeInfoLock.RLock()
defer nm.nodeInfoLock.RUnlock()
var nodeDetails []NodeDetails
vsphereSessionRefreshMap := make(map[string]bool)
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
for nodeName, nodeInfo := range nm.nodeInfoMap {
nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm})
if vsphereSessionRefreshMap[nodeInfo.vcServer] {
continue
}
vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer]
if vsphereInstance == nil {
err := fmt.Errorf("vSphereInstance for vc server %q not found while looking for vm %q", nodeInfo.vcServer, nodeInfo.vm)
return nil, err
}
err := vsphereInstance.conn.Connect(ctx)
if err != nil {
return nil, err
}
vsphereSessionRefreshMap[nodeInfo.vcServer] = true
}
return nodeDetails
return nodeDetails, nil
}
func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) {

View File

@ -639,7 +639,8 @@ func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) {
instanceID, err := instanceIDInternal()
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("InstanceID: Found node %q", convertToString(nodeName))
@ -729,14 +730,17 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyName string, nodeN
requestTime := time.Now()
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
glog.V(4).Infof("AttachDisk: Found node %q", convertToString(nodeName))
diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyName, nodeName)
glog.V(4).Infof("AttachDisk: Retry: diskUUID %s, err +%v", convertToString(nodeName), diskUUID, err)
}
}
}
glog.V(4).Infof("AttachDisk executed for node %s and volume %s with diskUUID %s. Err: %s", convertToString(nodeName), vmDiskPath, diskUUID, err)
vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err)
return diskUUID, err
}
@ -792,7 +796,8 @@ func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error
requestTime := time.Now()
err := detachDiskInternal(volPath, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == nil {
err = detachDiskInternal(volPath, nodeName)
@ -847,7 +852,8 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b
requestTime := time.Now()
isAttached, err := diskIsAttachedInternal(volPath, nodeName)
if err != nil {
isManagedObjectNotFoundError, err := vs.retry(nodeName, err)
var isManagedObjectNotFoundError bool
isManagedObjectNotFoundError, err = vs.retry(nodeName, err)
if isManagedObjectNotFoundError {
if err == vclib.ErrNoVMFound {
isAttached, err = false, nil

View File

@ -187,8 +187,13 @@ func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nod
// Get all datastores accessible for the virtual machine object.
func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
nodeVmDetails := nodeManager.GetNodeDetails()
if nodeVmDetails == nil || len(nodeVmDetails) == 0 {
nodeVmDetails, err := nodeManager.GetNodeDetails()
if err != nil {
glog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err)
return nil, err
}
if len(nodeVmDetails) == 0 {
msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails)
glog.Error(msg)
return nil, fmt.Errorf(msg)
@ -210,7 +215,7 @@ func getSharedDatastoresInK8SCluster(ctx context.Context, dc *vclib.Datacenter,
}
}
glog.V(9).Infof("sharedDatastores : %+v", sharedDatastores)
sharedDatastores, err := getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
sharedDatastores, err = getDatastoresForEndpointVC(ctx, dc, sharedDatastores)
if err != nil {
glog.Errorf("Failed to get shared datastores from endpoint VC. err: %+v", err)
return nil, err