Merge pull request #44938 from jayunit100/cleanup-orphan-logging

Automatic merge from submit-queue (batch tested with PRs 45033, 44961, 45021, 45097, 44938)

Cleanup orphan logging that goes on in the sync loop.

**What this PR does / why we need it**:

Fixes #44937  

**Before this PR** The older logs were like this:

```
E0426 00:06:33.763347   21247 kubelet_volumes.go:114] Orphaned pod "35c4a858-2a12-11e7-910c-42010af00003" found, but volume paths are still present on disk.
E0426 00:06:33.763400   21247 kubelet_volumes.go:114] Orphaned pod "e7676365-1580-11e7-8c27-42010af00003" found, but volume paths are still present on disk.
```

The problem being that, all the volumes were spammed w/ no summary info.   

**After this PR** the logs look like this:

```
E0426 01:32:27.295568   22261 kubelet_volumes.go:129] Orphaned pod "408b060e-2a1d-11e7-90e8-42010af00003" found, but volume paths are still present on disk. : There were a total of 2 errors similar to this.  Turn up verbosity to see them.
E0426 01:32:29.295515   22261 kubelet_volumes.go:129] Orphaned pod "408b060e-2a1d-11e7-90e8-42010af00003" found, but volume paths are still present on disk. : There were a total of 2 errors similar to this.  Turn up verbosity to see them.
E0426 01:32:31.293180   22261 kubelet_volumes.go:129] Orphaned pod "408b060e-2a1d-11e7-90e8-42010af00003" found, but volume paths are still present on disk. : There were a total of 2 errors similar to this.  Turn up verbosity to see them.
```

And with logging turned up, the extra info logs are shown with details:

```
E0426 01:34:21.933983   26010 kubelet_volumes.go:129] Orphaned pod "1c565800-2a20-11e7-bbc2-42010af00003" found, but volume paths are still present on disk. : There were a total of 3 errors similar to this.  Turn up verbosity to see them.
I0426 01:34:21.934010   26010 kubelet_volumes.go:131] Orphan pod: Orphaned pod "1c565800-2a20-11e7-bbc2-42010af00003" found, but volume paths are still present on disk.
I0426 01:34:21.934015   26010 kubelet_volumes.go:131] Orphan pod: Orphaned pod "408b060e-2a1d-11e7-90e8-42010af00003" found, but volume paths are still present on disk.
I0426 01:34:21.934019   26010 kubelet_volumes.go:131] Orphan pod: Orphaned pod "e7676365-1580-11e7-8c27-42010af00003" found, but volume paths are still present on disk.
```

**Release note**

```release-note
Roll up volume error messages in the kubelet sync loop.
```
This commit is contained in:
Kubernetes Submit Queue 2017-04-28 13:16:47 -07:00 committed by GitHub
commit e06fc087e0

View File

@ -78,7 +78,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o
}
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
// running and that have no containers running.
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
func (kl *Kubelet) cleanupOrphanedPodDirs(
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
allPods := sets.NewString()
@ -93,7 +93,10 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(
if err != nil {
return err
}
errlist := []error{}
orphanRemovalErrors := []error{}
orphanVolumeErrors := []error{}
for _, uid := range found {
if allPods.Has(string(uid)) {
continue
@ -107,18 +110,29 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(
// If there are still volume directories, do not delete directory
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
if err != nil {
glog.Errorf("Orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err)
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
continue
}
if len(volumePaths) > 0 {
glog.Errorf("Orphaned pod %q found, but volume paths are still present on disk.", uid)
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk.", uid))
continue
}
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
glog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
errlist = append(errlist, err)
orphanRemovalErrors = append(orphanRemovalErrors, err)
}
}
return utilerrors.NewAggregate(errlist)
logSpew := func(errs []error) {
if len(errs) > 0 {
glog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs))
for _, err := range errs {
glog.V(5).Infof("Orphan pod: %v", err)
}
}
}
logSpew(orphanVolumeErrors)
logSpew(orphanRemovalErrors)
return utilerrors.NewAggregate(orphanRemovalErrors)
}