diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 0aa8456a23a..f4a465b0fd4 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -2326,15 +2326,31 @@ func TestSyncStates(t *testing.T) { filepath.Join("pod2uid", "volumes", "fake-plugin", "volume-name"), }, createMountPoint: true, - podInfos: []podInfo{defaultPodInfo}, + podInfos: []podInfo{ + { + podName: "pod2", + podUID: "pod2uid", + outerVolumeName: "volume-name", + innerVolumeName: "volume-name", + }, + }, verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error { // for pod that is deleted, volume is considered as mounted mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes() if len(mountedPods) != 1 { return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods)) } - if types.UniquePodName("pod2uid") != mountedPods[0].PodName { - return fmt.Errorf("expected mounted pod to be %s got %s", "pod2uid", mountedPods[0].PodName) + if types.UniquePodName("pod1uid") != mountedPods[0].PodName { + return fmt.Errorf("expected mounted pod to be %s got %s", "pod1uid", mountedPods[0].PodName) + } + + // for pod that is in dsw, volume is in skippedDuringReconstruction + skippedVolumes := rcInstance.skippedDuringReconstruction + if len(skippedVolumes) != 1 { + return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes)) + } + if skippedVolumes["fake-plugin/volume-name"] == nil { + return fmt.Errorf("expected %s is in skippedDuringReconstruction, got %+v", "fake-plugin/volume-name", skippedVolumes) } return nil }, diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct.go b/pkg/kubelet/volumemanager/reconciler/reconstruct.go index 71bc69e8f0b..ee00537da90 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct.go @@ -80,10 +80,6 @@ func (rc *reconciler) syncStates(kubeletPodDir string) { blockVolumeMapper: reconstructedVolume.blockVolumeMapper, mounter: reconstructedVolume.mounter, } - if cachedInfo, ok := volumesNeedUpdate[reconstructedVolume.volumeName]; ok { - gvl = cachedInfo - } - gvl.addPodVolume(reconstructedVolume) if volumeInDSW { // Some pod needs the volume. And it exists on disk. Some previous // kubelet must have created the directory, therefore it must have @@ -91,6 +87,10 @@ func (rc *reconciler) syncStates(kubeletPodDir string) { // this new kubelet so reconcile() calls SetUp and re-mounts the // volume if it's necessary. volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName) + if cachedInfo, ok := rc.skippedDuringReconstruction[reconstructedVolume.volumeName]; ok { + gvl = cachedInfo + } + gvl.addPodVolume(reconstructedVolume) rc.skippedDuringReconstruction[reconstructedVolume.volumeName] = gvl klog.V(4).InfoS("Volume exists in desired state, marking as InUse", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName) continue @@ -100,6 +100,10 @@ func (rc *reconciler) syncStates(kubeletPodDir string) { klog.InfoS("Volume is in pending operation, skip cleaning up mounts") } klog.V(2).InfoS("Reconciler sync states: could not find pod information in desired state, update it in actual state", "reconstructedVolume", reconstructedVolume) + if cachedInfo, ok := volumesNeedUpdate[reconstructedVolume.volumeName]; ok { + gvl = cachedInfo + } + gvl.addPodVolume(reconstructedVolume) volumesNeedUpdate[reconstructedVolume.volumeName] = gvl }