Merge pull request #59873 from jsafrane/fix-downward-flake

Automatic merge from submit-queue (batch tested with PRs 59873, 59933, 59923, 59944, 59953). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix DownwardAPI refresh race.

WaitForAttachAndMount should mark only pod in DesiredStateOfWorldPopulator (DSWP) and DSWP should mark the volume to be remounted only when the new pod has been processed.

Otherwise DSWP and reconciler race who gets the new pod first. If it's reconciler, then DownwardAPI and Projected volumes of the pod are not refreshed with new content and they are updated after the next periodic sync (60-90 seconds).

Fixes #59813 

/assign @jingxu97 @saad-ali 
/sig storage
/sig node

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue
2018-02-15 20:16:32 -08:00
committed by GitHub
3 changed files with 9 additions and 1 deletions

View File

@@ -83,6 +83,7 @@ func NewDesiredStateOfWorldPopulator(
podManager pod.Manager,
podStatusProvider status.PodStatusProvider,
desiredStateOfWorld cache.DesiredStateOfWorld,
actualStateOfWorld cache.ActualStateOfWorld,
kubeContainerRuntime kubecontainer.Runtime,
keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator {
return &desiredStateOfWorldPopulator{
@@ -92,6 +93,7 @@ func NewDesiredStateOfWorldPopulator(
podManager: podManager,
podStatusProvider: podStatusProvider,
desiredStateOfWorld: desiredStateOfWorld,
actualStateOfWorld: actualStateOfWorld,
pods: processedPods{
processedPods: make(map[volumetypes.UniquePodName]bool)},
kubeContainerRuntime: kubeContainerRuntime,
@@ -108,6 +110,7 @@ type desiredStateOfWorldPopulator struct {
podManager pod.Manager
podStatusProvider status.PodStatusProvider
desiredStateOfWorld cache.DesiredStateOfWorld
actualStateOfWorld cache.ActualStateOfWorld
pods processedPods
kubeContainerRuntime kubecontainer.Runtime
timeOfLastGetPodStatus time.Time
@@ -302,6 +305,9 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *v1.Pod) {
// some of the volume additions may have failed, should not mark this pod as fully processed
if allVolumesAdded {
dswp.markPodProcessed(uniquePodName)
// New pod has been synced. Re-mount all volumes that need it
// (e.g. DownwardAPI)
dswp.actualStateOfWorld.MarkRemountRequired(uniquePodName)
}
}

View File

@@ -525,6 +525,7 @@ func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.Persist
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
fakesDSW := cache.NewDesiredStateOfWorld(fakeVolumePluginMgr)
fakeASW := cache.NewActualStateOfWorld("fake", fakeVolumePluginMgr)
fakeRuntime := &containertest.FakeRuntime{}
fakeStatusManager := status.NewManager(fakeClient, fakePodManager, &statustest.FakePodDeletionSafetyProvider{})
@@ -536,6 +537,7 @@ func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.Persist
podManager: fakePodManager,
podStatusProvider: fakeStatusManager,
desiredStateOfWorld: fakesDSW,
actualStateOfWorld: fakeASW,
pods: processedPods{
processedPods: make(map[types.UniquePodName]bool)},
kubeContainerRuntime: fakeRuntime,

View File

@@ -179,6 +179,7 @@ func NewVolumeManager(
podManager,
podStatusProvider,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
kubeContainerRuntime,
keepTerminatedPodVolumes)
vm.reconciler = reconciler.NewReconciler(
@@ -345,7 +346,6 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
// Remount plugins for which this is true. (Atomically updating volumes,
// like Downward API, depend on this to update the contents of the volume).
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
err := wait.Poll(
podAttachAndMountRetryInterval,