Merge pull request #119923 from cvvz/fix-119921

fix: Mount point may become local without calling `NodePublishVolume` after node rebooting
This commit is contained in:
Kubernetes Prow Robot 2023-12-13 21:25:51 +01:00 committed by GitHub
commit 26e2cc5299
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 27 additions and 7 deletions

View File

@ -2326,15 +2326,31 @@ func TestSyncStates(t *testing.T) {
filepath.Join("pod2uid", "volumes", "fake-plugin", "volume-name"),
},
createMountPoint: true,
podInfos: []podInfo{defaultPodInfo},
podInfos: []podInfo{
{
podName: "pod2",
podUID: "pod2uid",
outerVolumeName: "volume-name",
innerVolumeName: "volume-name",
},
},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
// for pod that is deleted, volume is considered as mounted
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
if len(mountedPods) != 1 {
return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
}
if types.UniquePodName("pod2uid") != mountedPods[0].PodName {
return fmt.Errorf("expected mounted pod to be %s got %s", "pod2uid", mountedPods[0].PodName)
if types.UniquePodName("pod1uid") != mountedPods[0].PodName {
return fmt.Errorf("expected mounted pod to be %s got %s", "pod1uid", mountedPods[0].PodName)
}
// for pod that is in dsw, volume is in skippedDuringReconstruction
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) != 1 {
return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
}
if skippedVolumes["fake-plugin/volume-name"] == nil {
return fmt.Errorf("expected %s is in skippedDuringReconstruction, got %+v", "fake-plugin/volume-name", skippedVolumes)
}
return nil
},

View File

@ -80,10 +80,6 @@ func (rc *reconciler) syncStates(kubeletPodDir string) {
blockVolumeMapper: reconstructedVolume.blockVolumeMapper,
mounter: reconstructedVolume.mounter,
}
if cachedInfo, ok := volumesNeedUpdate[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
if volumeInDSW {
// Some pod needs the volume. And it exists on disk. Some previous
// kubelet must have created the directory, therefore it must have
@ -91,6 +87,10 @@ func (rc *reconciler) syncStates(kubeletPodDir string) {
// this new kubelet so reconcile() calls SetUp and re-mounts the
// volume if it's necessary.
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
if cachedInfo, ok := rc.skippedDuringReconstruction[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
rc.skippedDuringReconstruction[reconstructedVolume.volumeName] = gvl
klog.V(4).InfoS("Volume exists in desired state, marking as InUse", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
continue
@ -100,6 +100,10 @@ func (rc *reconciler) syncStates(kubeletPodDir string) {
klog.InfoS("Volume is in pending operation, skip cleaning up mounts")
}
klog.V(2).InfoS("Reconciler sync states: could not find pod information in desired state, update it in actual state", "reconstructedVolume", reconstructedVolume)
if cachedInfo, ok := volumesNeedUpdate[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
volumesNeedUpdate[reconstructedVolume.volumeName] = gvl
}