mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 20:42:26 +00:00
Mark reconstructed volumes as reported InUse
When a newly started kubelet finds a directory where a volume should be, it can be fairly confident that the volume was mounted by previous kubelet and therefore the volume must have been in node.status.volumesInUse. Therefore we can mark reconstructed volumes as already reported so subsequent reconcile() can fix the directory and put the mounted volume into actual state of world.
This commit is contained in:
parent
cca92f67aa
commit
c51e843f3c
@ -357,21 +357,39 @@ func (rc *reconciler) syncStates() {
|
||||
return
|
||||
}
|
||||
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*reconstructedVolume)
|
||||
volumeNeedReport := []v1.UniqueVolumeName{}
|
||||
for _, volume := range podVolumes {
|
||||
if rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
|
||||
glog.V(4).Infof("Volume exists in actual state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
// There is nothing to reconstruct
|
||||
continue
|
||||
}
|
||||
volumeInDSW := rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName)
|
||||
|
||||
reconstructedVolume, err := rc.reconstructVolume(volume)
|
||||
if err != nil {
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume, don't clean it up and hope that
|
||||
// reconcile() calls SetUp and reconstructs the volume in ASW.
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), skip cleaning up mounts", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// No pod needs the volume.
|
||||
glog.Warningf("Could not construct volume information, cleanup the mounts. (pod.UID %s, volume.SpecName %s): %v", volume.podName, volume.volumeSpecName, err)
|
||||
rc.cleanupMounts(volume)
|
||||
continue
|
||||
}
|
||||
if volumeInDSW {
|
||||
// Some pod needs the volume. And it exists on disk. Some previous
|
||||
// kubelet must have created the directory, therefore it must have
|
||||
// reported the volume as in use. Mark the volume as in use also in
|
||||
// this new kubelet so reconcile() calls SetUp and re-mounts the
|
||||
// volume if it's necessary.
|
||||
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
|
||||
glog.V(4).Infof("Volume exists in desired state (volume.SpecName %s, pod.UID %s), marking as InUse", volume.volumeSpecName, volume.podName)
|
||||
continue
|
||||
}
|
||||
// There is no pod that uses the volume.
|
||||
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName) {
|
||||
glog.Warning("Volume is in pending operation, skip cleaning up mounts")
|
||||
}
|
||||
@ -386,7 +404,9 @@ func (rc *reconciler) syncStates() {
|
||||
glog.Errorf("Error occurred during reconstruct volume from disk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(volumeNeedReport) > 0 {
|
||||
rc.desiredStateOfWorld.MarkVolumesReportedInUse(volumeNeedReport)
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *reconciler) cleanupMounts(volume podVolume) {
|
||||
|
Loading…
Reference in New Issue
Block a user