mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #29641 from ivan4th/fix-configmap-race
Automatic merge from submit-queue Fix wrapped volume race **EDIT:** now covers configmap, secret, downwardapi & git_repo volume plugins. Fixes #29297. wrappedVolumeSpec used by configMapVolumeMounter and configMapVolumeUnmounter contained a pointer to api.Volume which was being patched by NewWrapperMounter/NewWrapperUnmounter, causing race condition during configmap volume mounts. See https://github.com/kubernetes/kubernetes/issues/29297#issuecomment-235403806 for complete explanation. The subtle bug was introduced by #18445, it also can affect other volume plugins utilizing `wrappedVolumeSpec` technique, if this PR is correct/accepted will make more PRs for secrets etc. Although tmpfs variety of inner `emptyDir` volume appears to be less susceptible to this race, there's chance it can fail too. The errors produced by this race look like this: ```Jul 19 17:05:21 ubuntu1604 kubelet[17097]: I0719 17:05:21.854303 17097 reconciler.go:253] MountVolume operation started for volume "kubernetes.io/configmap/foo-files" (spec.Name: "files") to pod "11786582-4dbf-11e6-9fc9-64cca009c636" (UID: "11786582-4dbf-11e6-9fc9-64cca009c636"). Jul 19 17:05:21 ubuntu1604 kubelet[17097]: I0719 17:05:21.854842 17097 reconciler.go:253] MountVolume operation started for volume "kubernetes.io/configmap/bar-file s" (spec.Name: "files") to pod "117d2c22-4dbf-11e6-9fc9-64cca009c636" (UID: "117d2c22-4dbf-11e6-9fc9-64cca009c636"). Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.860796 17097 configmap.go:171] Error creating atomic writer: stat /var/lib/kubelet/pods/117d2c22-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.861070 17097 goroutinemap.go:155] Operation for "kubernetes.io/configmap/bar-files" failed. No retries permitted until 2016-07-19 17:07:21.861036886 +0200 CEST (durationBeforeRetry 2m0s). error: MountVolume.SetUp failed for volume "kubernetes.io/configmap/bar-files" (spec.Name: "files") pod "117d2c22-4dbf-11e6-9fc9-64cca009c636" (UID: "117d2c22-4dbf-11e6-9fc9-64cca009c636") with: stat /var/lib/kubelet/pods/117d2c22-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.861271 17097 configmap.go:171] Error creating atomic writer: stat /var/lib/kubelet/pods/11786582-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory Jul 19 17:05:21 ubuntu1604 kubelet[17097]: E0719 17:05:21.862284 17097 goroutinemap.go:155] Operation for "kubernetes.io/configmap/foo-files" failed. No retries permitted until 2016-07-19 17:07:21.862275753 +0200 CEST (durationBeforeRetry 2m0s). error: MountVolume.SetUp failed for volume "kubernetes.io/configmap/foo-files" (spec.Name: "files") pod "11786582-4dbf-11e6-9fc9-64cca009c636" (UID: "11786582-4dbf-11e6-9fc9-64cca009c636") with: stat /var/lib/kubelet/pods/11786582-4dbf-11e6-9fc9-64cca009c636/volumes/kubernetes.io~configmap/files: no such file or directory``` Note "Error creating atomic writer" errors. This problem can be reproduced by making kubelet mount multiple config map volumes in parallel.
This commit is contained in:
commit
e86b3f266c
@ -121,13 +121,15 @@ func (sv *configMapVolume) GetAttributes() volume.Attributes {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func wrappedVolumeSpec() volume.Spec {
|
||||||
// This is the spec for the volume that this plugin wraps.
|
// This is the spec for the volume that this plugin wraps.
|
||||||
var wrappedVolumeSpec = volume.Spec{
|
return volume.Spec{
|
||||||
// This should be on a tmpfs instead of the local disk; the problem is
|
// This should be on a tmpfs instead of the local disk; the problem is
|
||||||
// charging the memory for the tmpfs to the right cgroup. We should make
|
// charging the memory for the tmpfs to the right cgroup. We should make
|
||||||
// this a tmpfs when we can do the accounting correctly.
|
// this a tmpfs when we can do the accounting correctly.
|
||||||
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
|
func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
|
||||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||||
@ -137,7 +139,7 @@ func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the setup.
|
// Wrap EmptyDir, let it do the setup.
|
||||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
|
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -236,7 +238,7 @@ func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
|||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
// Wrap EmptyDir, let it do the teardown.
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
|
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -49,9 +49,11 @@ type downwardAPIPlugin struct {
|
|||||||
|
|
||||||
var _ volume.VolumePlugin = &downwardAPIPlugin{}
|
var _ volume.VolumePlugin = &downwardAPIPlugin{}
|
||||||
|
|
||||||
var wrappedVolumeSpec = volume.Spec{
|
func wrappedVolumeSpec() volume.Spec {
|
||||||
|
return volume.Spec{
|
||||||
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
|
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
|
func (plugin *downwardAPIPlugin) Init(host volume.VolumeHost) error {
|
||||||
plugin.host = host
|
plugin.host = host
|
||||||
@ -144,7 +146,7 @@ func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
|||||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, b.pod, *b.opts)
|
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), b.pod, *b.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||||
return err
|
return err
|
||||||
@ -233,7 +235,7 @@ func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
|||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
// Wrap EmptyDir, let it do the teardown.
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
|
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -41,9 +41,11 @@ type gitRepoPlugin struct {
|
|||||||
|
|
||||||
var _ volume.VolumePlugin = &gitRepoPlugin{}
|
var _ volume.VolumePlugin = &gitRepoPlugin{}
|
||||||
|
|
||||||
var wrappedVolumeSpec = volume.Spec{
|
func wrappedVolumeSpec() volume.Spec {
|
||||||
|
return volume.Spec{
|
||||||
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
gitRepoPluginName = "kubernetes.io/git-repo"
|
gitRepoPluginName = "kubernetes.io/git-repo"
|
||||||
@ -155,7 +157,7 @@ func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the setup.
|
// Wrap EmptyDir, let it do the setup.
|
||||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, b.opts)
|
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, b.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -237,7 +239,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
|
|||||||
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
// Wrap EmptyDir, let it do the teardown.
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
|
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -45,9 +45,11 @@ type secretPlugin struct {
|
|||||||
|
|
||||||
var _ volume.VolumePlugin = &secretPlugin{}
|
var _ volume.VolumePlugin = &secretPlugin{}
|
||||||
|
|
||||||
var wrappedVolumeSpec = volume.Spec{
|
func wrappedVolumeSpec() volume.Spec {
|
||||||
|
return volume.Spec{
|
||||||
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
|
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(secretPluginName), volName)
|
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(secretPluginName), volName)
|
||||||
@ -150,7 +152,7 @@ func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
|||||||
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the setup.
|
// Wrap EmptyDir, let it do the setup.
|
||||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
|
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec(), &b.pod, *b.opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -249,7 +251,7 @@ func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
|
|||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
// Wrap EmptyDir, let it do the teardown.
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
|
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user