Merge pull request #39311 from rkouj/refactor-tear-down-at

Automatic merge from submit-queue

Check if pathExists before performing Unmount

Unmount operation should not fail if path does not exist

Part two of: https://github.com/kubernetes/kubernetes/pull/38547
Plugins status captured here: https://github.com/kubernetes/kubernetes/issues/39251

cc: @saad-ali
This commit is contained in:
Kubernetes Submit Queue 2017-01-04 18:10:30 -08:00 committed by GitHub
commit eb8739d3c1
7 changed files with 34 additions and 32 deletions

View File

@ -267,14 +267,7 @@ func (c *configMapVolumeUnmounter) TearDown() error {
}
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {

View File

@ -272,14 +272,7 @@ func (c *downwardAPIVolumeUnmounter) TearDown() error {
}
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func (b *downwardAPIVolumeMounter) getMetaDir() string {

View File

@ -299,6 +299,13 @@ func (ed *emptyDir) TearDown() error {
// TearDownAt simply discards everything in the directory.
func (ed *emptyDir) TearDownAt(dir string) error {
if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
// Figure out the medium.
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
if err != nil {

View File

@ -254,13 +254,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
// TearDownAt simply deletes everything in the directory.
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {

View File

@ -294,14 +294,7 @@ func (c *secretVolumeUnmounter) TearDown() error {
}
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
}
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {

View File

@ -34,7 +34,9 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
type RecycleEventRecorder func(eventtype, message string)
@ -332,3 +334,23 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string {
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
}

View File

@ -194,6 +194,7 @@ Garbage collector should orphan pods created by rc if deleteOptions.OrphanDepend
"Generated release_1_5 clientset should create pods, delete pods, watch pods",rrati,0
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
HA-master survive addition/removal replicas multizone workers,rkouj,0
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
Hazelcast should create and scale hazelcast,mikedanese,1
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5,jszczepkowski,0
@ -697,7 +698,6 @@ k8s.io/kubernetes/pkg/kubelet/kuberuntime,yifan-gu,1
k8s.io/kubernetes/pkg/kubelet/lifecycle,yujuhong,1
k8s.io/kubernetes/pkg/kubelet/network,freehan,0
k8s.io/kubernetes/pkg/kubelet/network/cni,freehan,0
k8s.io/kubernetes/pkg/kubelet/network/exec,freehan,0
k8s.io/kubernetes/pkg/kubelet/network/hairpin,freehan,0
k8s.io/kubernetes/pkg/kubelet/network/hostport,erictune,1
k8s.io/kubernetes/pkg/kubelet/network/kubenet,freehan,0

1 name owner auto-assigned
194 Generated release_1_5 clientset should create pods, delete pods, watch pods rrati 0
195 Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs soltysh 1
196 HA-master survive addition/removal replicas different zones derekwaynecarr 0
197 HA-master survive addition/removal replicas multizone workers rkouj 0
198 HA-master survive addition/removal replicas same zone derekwaynecarr 0
199 Hazelcast should create and scale hazelcast mikedanese 1
200 Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5 jszczepkowski 0
698 k8s.io/kubernetes/pkg/kubelet/lifecycle yujuhong 1
699 k8s.io/kubernetes/pkg/kubelet/network freehan 0
700 k8s.io/kubernetes/pkg/kubelet/network/cni freehan 0
k8s.io/kubernetes/pkg/kubelet/network/exec freehan 0
701 k8s.io/kubernetes/pkg/kubelet/network/hairpin freehan 0
702 k8s.io/kubernetes/pkg/kubelet/network/hostport erictune 1
703 k8s.io/kubernetes/pkg/kubelet/network/kubenet freehan 0