Merge pull request #29641 from ivan4th/fix-configmap-race

Automatic merge from submit-queue

Fix wrapped volume race

**EDIT:** now covers configmap, secret, downwardapi & git_repo volume plugins.

Fixes #29297.

wrappedVolumeSpec used by configMapVolumeMounter and
configMapVolumeUnmounter contained a pointer to api.Volume which was
being patched by NewWrapperMounter/NewWrapperUnmounter, causing race
condition during configmap volume mounts.

See https://github.com/kubernetes/kubernetes/issues/29297#issuecomment-235403806 for complete explanation.
The subtle bug was introduced by #18445, it also can affect other volume plugins utilizing `wrappedVolumeSpec` technique, if this PR is correct/accepted will make more PRs for secrets etc. Although tmpfs variety of inner `emptyDir` volume appears to be less susceptible to this race, there's chance it can fail too.

The errors produced by this race look like this:
```Jul 19 17:05:21 ubuntu1604 kubelet[17097]: I0719 17:05:21.854303   17097 reconciler.go:253] MountVolume operation started for volume "kubernetes.io/configmap/foo-files"
 (spec.Name: "files") to pod "11786582-4dbf-11e6-9fc9-64cca009c636" (UID: "11786582-4dbf-11e6-9fc9-64cca009c636").
Jul 19 17:05:21 ubuntu1604 kubelet[17097]: I0719 17:05:21.854842   17097 reconciler.go:253] MountVolume operation started for volume "kubernetes.io/configmap/bar-file
s" (spec.Name: "files") to pod "117d2c22-4dbf-11e6-9fc9-64cca009c636" (UID: "117d2c22-4dbf-11e6-9fc9-64cca009c636").
Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.860796   17097 configmap.go:171] Error creating atomic writer: stat /var/lib/kubelet/pods/117d2c22-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory
Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.861070   17097 goroutinemap.go:155] Operation for "kubernetes.io/configmap/bar-files" failed. No retries permitted until 2016-07-19 17:07:21.861036886 +0200 CEST (durationBeforeRetry 2m0s). error: MountVolume.SetUp failed for volume "kubernetes.io/configmap/bar-files" (spec.Name: "files") pod "117d2c22-4dbf-11e6-9fc9-64cca009c636" (UID: "117d2c22-4dbf-11e6-9fc9-64cca009c636") with: stat /var/lib/kubelet/pods/117d2c22-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory
Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.861271   17097 configmap.go:171] Error creating atomic writer: stat /var/lib/kubelet/pods/11786582-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory
Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.862284   17097 goroutinemap.go:155] Operation for "kubernetes.io/configmap/foo-files" failed. No retries permitted until 2016-07-19 17:07:21.862275753 +0200 CEST (durationBeforeRetry 2m0s). error: MountVolume.SetUp failed for volume "kubernetes.io/configmap/foo-files" (spec.Name: "files") pod "11786582-4dbf-11e6-9fc9-64cca009c636" (UID: "11786582-4dbf-11e6-9fc9-64cca009c636") with: stat /var/lib/kubelet/pods/11786582-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory```

Note "Error creating atomic writer" errors.
This problem can be reproduced by making kubelet mount multiple config map volumes in parallel.
This commit is contained in:
k8s-merge-robot 2016-07-27 14:24:14 -07:00 committed by GitHub
commit e86b3f266c
4 changed files with 28 additions and 20 deletions

View File

@ -121,12 +121,14 @@ func (sv *configMapVolume) GetAttributes() volume.Attributes {
}
}
// This is the spec for the volume that this plugin wraps.
var wrappedVolumeSpec = volume.Spec{
// This should be on a tmpfs instead of the local disk; the problem is
// charging the memory for the tmpfs to the right cgroup. We should make
// this a tmpfs when we can do the accounting correctly.
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
func wrappedVolumeSpec() volume.Spec {
// This is the spec for the volume that this plugin wraps.
return volume.Spec{
// This should be on a tmpfs instead of the local disk; the problem is
// charging the memory for the tmpfs to the right cgroup. We should make
// this a tmpfs when we can do the accounting correctly.
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
}
}
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
@ -137,7 +139,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
if err != nil {
return err
}
@ -236,7 +238,7 @@ func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}

View File

@ -49,8 +49,10 @@ type downwardAPIPlugin struct {
var _ volume.VolumePlugin = &downwardAPIPlugin{}
var wrappedVolumeSpec = volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
}
}
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
@ -144,7 +146,7 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, b.pod, *b.opts)
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
if err != nil {
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err
@ -233,7 +235,7 @@ func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}

View File

@ -41,8 +41,10 @@ type gitRepoPlugin struct {
var _ volume.VolumePlugin = &gitRepoPlugin{}
var wrappedVolumeSpec = volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
}
}
const (
@ -155,7 +157,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
}
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, b.opts)
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts)
if err != nil {
return err
}
@ -237,7 +239,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}

View File

@ -45,8 +45,10 @@ type secretPlugin struct {
var _ volume.VolumePlugin = &secretPlugin{}
var wrappedVolumeSpec = volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
func wrappedVolumeSpec() volume.Spec {
return volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
}
}
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
@ -150,7 +152,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
if err != nil {
return err
}
@ -249,7 +251,7 @@ func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}