mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Merge pull request #39311 from rkouj/refactor-tear-down-at
Automatic merge from submit-queue Check if pathExists before performing Unmount Unmount operation should not fail if path does not exist Part two of: https://github.com/kubernetes/kubernetes/pull/38547 Plugins status captured here: https://github.com/kubernetes/kubernetes/issues/39251 cc: @saad-ali
This commit is contained in:
commit
eb8739d3c1
@ -267,14 +267,7 @@ func (c *configMapVolumeUnmounter) TearDown() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrapped.TearDownAt(dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.ConfigMapVolumeSource, bool) {
|
||||||
|
@ -272,14 +272,7 @@ func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrapped.TearDownAt(dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||||
|
@ -299,6 +299,13 @@ func (ed *emptyDir) TearDown() error {
|
|||||||
|
|
||||||
// TearDownAt simply discards everything in the directory.
|
// TearDownAt simply discards everything in the directory.
|
||||||
func (ed *emptyDir) TearDownAt(dir string) error {
|
func (ed *emptyDir) TearDownAt(dir string) error {
|
||||||
|
if pathExists, pathErr := volumeutil.PathExists(dir); pathErr != nil {
|
||||||
|
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||||
|
} else if !pathExists {
|
||||||
|
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Figure out the medium.
|
// Figure out the medium.
|
||||||
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
medium, isMnt, err := ed.mountDetector.GetMountMedium(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -254,13 +254,7 @@ func (c *gitRepoVolumeUnmounter) TearDown() error {
|
|||||||
|
|
||||||
// TearDownAt simply deletes everything in the directory.
|
// TearDownAt simply deletes everything in the directory.
|
||||||
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
|
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrapped.TearDownAt(dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.GitRepoVolumeSource, bool) {
|
||||||
|
@ -294,14 +294,7 @@ func (c *secretVolumeUnmounter) TearDown() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
|
func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
|
||||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
return volume.UnmountViaEmptyDir(dir, c.plugin.host, c.volName, wrappedVolumeSpec(), c.podUID)
|
||||||
|
|
||||||
// Wrap EmptyDir, let it do the teardown.
|
|
||||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec(), c.podUID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return wrapped.TearDownAt(dir)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*v1.SecretVolumeSource, bool) {
|
||||||
|
@ -34,7 +34,9 @@ import (
|
|||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
|
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RecycleEventRecorder func(eventtype, message string)
|
type RecycleEventRecorder func(eventtype, message string)
|
||||||
@ -332,3 +334,23 @@ func ChooseZoneForVolume(zones sets.String, pvcName string) string {
|
|||||||
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
|
||||||
return zone
|
return zone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
|
||||||
|
// to empty_dir
|
||||||
|
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
|
||||||
|
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
|
||||||
|
|
||||||
|
if pathExists, pathErr := volutil.PathExists(dir); pathErr != nil {
|
||||||
|
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||||
|
} else if !pathExists {
|
||||||
|
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap EmptyDir, let it do the teardown.
|
||||||
|
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return wrapped.TearDownAt(dir)
|
||||||
|
}
|
||||||
|
@ -194,6 +194,7 @@ Garbage collector should orphan pods created by rc if deleteOptions.OrphanDepend
|
|||||||
"Generated release_1_5 clientset should create pods, delete pods, watch pods",rrati,0
|
"Generated release_1_5 clientset should create pods, delete pods, watch pods",rrati,0
|
||||||
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
|
"Generated release_1_5 clientset should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs",soltysh,1
|
||||||
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
|
HA-master survive addition/removal replicas different zones,derekwaynecarr,0
|
||||||
|
HA-master survive addition/removal replicas multizone workers,rkouj,0
|
||||||
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
|
HA-master survive addition/removal replicas same zone,derekwaynecarr,0
|
||||||
Hazelcast should create and scale hazelcast,mikedanese,1
|
Hazelcast should create and scale hazelcast,mikedanese,1
|
||||||
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5,jszczepkowski,0
|
Horizontal pod autoscaling (scale resource: CPU) Deployment Should scale from 1 pod to 3 pods and from 3 to 5,jszczepkowski,0
|
||||||
@ -697,7 +698,6 @@ k8s.io/kubernetes/pkg/kubelet/kuberuntime,yifan-gu,1
|
|||||||
k8s.io/kubernetes/pkg/kubelet/lifecycle,yujuhong,1
|
k8s.io/kubernetes/pkg/kubelet/lifecycle,yujuhong,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/network,freehan,0
|
k8s.io/kubernetes/pkg/kubelet/network,freehan,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/network/cni,freehan,0
|
k8s.io/kubernetes/pkg/kubelet/network/cni,freehan,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/network/exec,freehan,0
|
|
||||||
k8s.io/kubernetes/pkg/kubelet/network/hairpin,freehan,0
|
k8s.io/kubernetes/pkg/kubelet/network/hairpin,freehan,0
|
||||||
k8s.io/kubernetes/pkg/kubelet/network/hostport,erictune,1
|
k8s.io/kubernetes/pkg/kubelet/network/hostport,erictune,1
|
||||||
k8s.io/kubernetes/pkg/kubelet/network/kubenet,freehan,0
|
k8s.io/kubernetes/pkg/kubelet/network/kubenet,freehan,0
|
||||||
|
|
Loading…
Reference in New Issue
Block a user