mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #46562 from dixudx/volume_manager_function_format
Automatic merge from submit-queue (batch tested with PRs 46661, 46562, 46657, 46655, 46640) remove redundant carriage return for readable **What this PR does / why we need it**: remove redundant carriage to make it more readable.
This commit is contained in:
commit
50c6a38c1e
@ -79,8 +79,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o
|
|||||||
|
|
||||||
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
||||||
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
|
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
|
||||||
func (kl *Kubelet) cleanupOrphanedPodDirs(
|
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
|
||||||
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
|
|
||||||
allPods := sets.NewString()
|
allPods := sets.NewString()
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
allPods.Insert(string(pod.UID))
|
allPods.Insert(string(pod.UID))
|
||||||
|
@ -249,8 +249,7 @@ func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan str
|
|||||||
glog.Infof("Shutting down Kubelet Volume Manager")
|
glog.Infof("Shutting down Kubelet Volume Manager")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vm *volumeManager) GetMountedVolumesForPod(
|
func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
|
||||||
podName types.UniquePodName) container.VolumeMap {
|
|
||||||
podVolumes := make(container.VolumeMap)
|
podVolumes := make(container.VolumeMap)
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter}
|
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter}
|
||||||
@ -373,8 +372,7 @@ func (vm *volumeManager) WaitForAttachAndMount(pod *v1.Pod) error {
|
|||||||
|
|
||||||
// verifyVolumesMountedFunc returns a method that returns true when all expected
|
// verifyVolumesMountedFunc returns a method that returns true when all expected
|
||||||
// volumes are mounted.
|
// volumes are mounted.
|
||||||
func (vm *volumeManager) verifyVolumesMountedFunc(
|
func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
|
||||||
podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
|
|
||||||
return func() (done bool, err error) {
|
return func() (done bool, err error) {
|
||||||
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
|
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
|
||||||
}
|
}
|
||||||
@ -383,8 +381,7 @@ func (vm *volumeManager) verifyVolumesMountedFunc(
|
|||||||
// getUnmountedVolumes fetches the current list of mounted volumes from
|
// getUnmountedVolumes fetches the current list of mounted volumes from
|
||||||
// the actual state of the world, and uses it to process the list of
|
// the actual state of the world, and uses it to process the list of
|
||||||
// expectedVolumes. It returns a list of unmounted volumes.
|
// expectedVolumes. It returns a list of unmounted volumes.
|
||||||
func (vm *volumeManager) getUnmountedVolumes(
|
func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string {
|
||||||
podName types.UniquePodName, expectedVolumes []string) []string {
|
|
||||||
mountedVolumes := sets.NewString()
|
mountedVolumes := sets.NewString()
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
|
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
|
||||||
@ -394,8 +391,7 @@ func (vm *volumeManager) getUnmountedVolumes(
|
|||||||
|
|
||||||
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
|
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
|
||||||
// mountedVolumes to a list of unmountedVolumes and returns it.
|
// mountedVolumes to a list of unmountedVolumes and returns it.
|
||||||
func filterUnmountedVolumes(
|
func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string) []string {
|
||||||
mountedVolumes sets.String, expectedVolumes []string) []string {
|
|
||||||
unmountedVolumes := []string{}
|
unmountedVolumes := []string{}
|
||||||
for _, expectedVolume := range expectedVolumes {
|
for _, expectedVolume := range expectedVolumes {
|
||||||
if !mountedVolumes.Has(expectedVolume) {
|
if !mountedVolumes.Has(expectedVolume) {
|
||||||
|
@ -182,10 +182,7 @@ func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestVolumeManager(
|
func newTestVolumeManager(tmpDir string, podManager pod.Manager, kubeClient clientset.Interface) (VolumeManager, error) {
|
||||||
tmpDir string,
|
|
||||||
podManager pod.Manager,
|
|
||||||
kubeClient clientset.Interface) (VolumeManager, error) {
|
|
||||||
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
||||||
fakeRecorder := &record.FakeRecorder{}
|
fakeRecorder := &record.FakeRecorder{}
|
||||||
plugMgr := &volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
@ -275,10 +272,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol
|
|||||||
return node, pod, pv, claim
|
return node, pod, pv, claim
|
||||||
}
|
}
|
||||||
|
|
||||||
func simulateVolumeInUseUpdate(
|
func simulateVolumeInUseUpdate(volumeName v1.UniqueVolumeName, stopCh <-chan struct{}, volumeManager VolumeManager) {
|
||||||
volumeName v1.UniqueVolumeName,
|
|
||||||
stopCh <-chan struct{},
|
|
||||||
volumeManager VolumeManager) {
|
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
ticker := time.NewTicker(100 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
|
@ -298,8 +298,7 @@ func (r *hostPathDeleter) Delete() error {
|
|||||||
return os.RemoveAll(r.GetPath())
|
return os.RemoveAll(r.GetPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(
|
func getVolumeSource(spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
|
||||||
spec *volume.Spec) (*v1.HostPathVolumeSource, bool, error) {
|
|
||||||
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
||||||
return spec.Volume.HostPath, spec.ReadOnly, nil
|
return spec.Volume.HostPath, spec.ReadOnly, nil
|
||||||
} else if spec.PersistentVolume != nil &&
|
} else if spec.PersistentVolume != nil &&
|
||||||
|
Loading…
Reference in New Issue
Block a user