From c51e843f3c800f610ed03bda484a592170133fb1 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Wed, 7 Mar 2018 17:39:24 +0100 Subject: [PATCH] Mark reconstructed volumes as reported InUse When a newly started kubelet finds a directory where a volume should be, it can be fairly confident that the volume was mounted by previous kubelet and therefore the volume must have been in node.status.volumesInUse. Therefore we can mark reconstructed volumes as already reported so subsequent reconcile() can fix the directory and put the mounted volume into actual state of world. --- .../volumemanager/reconciler/reconciler.go | 30 +++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 4cec404ac3e..3ad88500354 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -357,21 +357,39 @@ func (rc *reconciler) syncStates() { return } volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume) + volumeNeedReport := []v1.UniqueVolumeName{} for _, volume := range podVolumes { - if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { - glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) - continue - } if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) { glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + // There is nothing to reconstruct continue } + volumeInDSW := rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) + reconstructedVolume, err := rc.reconstructVolume(volume) if err != nil { + if volumeInDSW { + // Some pod needs the volume, don't clean it up and hope that + // reconcile() calls SetUp and reconstructs the volume in ASW. + glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName) + continue + } + // No pod needs the volume. glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err) rc.cleanupMounts(volume) continue } + if volumeInDSW { + // Some pod needs the volume. And it exists on disk. Some previous + // kubelet must have created the directory, therefore it must have + // reported the volume as in use. Mark the volume as in use also in + // this new kubelet so reconcile() calls SetUp and re-mounts the + // volume if it's necessary. + volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName) + glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName) + continue + } + // There is no pod that uses the volume. if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) { glog.Warning("Volume is in pending operation, skip cleaning up mounts") } @@ -386,7 +404,9 @@ func (rc *reconciler) syncStates() { glog.Errorf("Error occurred during reconstruct volume from disk: %v", err) } } - + if len(volumeNeedReport) > 0 { + rc.desiredStateOfWorld.MarkVolumesReportedInUse(volumeNeedReport) + } } func (rc *reconciler) cleanupMounts(volume podVolume) {