mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #125139 from huww98/kubelet-vm-cleaup
kubelet/volumemanager: cleanup set and sort
This commit is contained in:
commit
3d6c99e1a7
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -340,7 +340,7 @@ func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||
}
|
||||
|
||||
result := make([]int64, 0, supplementalGroups.Len())
|
||||
for _, group := range sets.List(supplementalGroups) {
|
||||
for _, group := range supplementalGroups.UnsortedList() {
|
||||
iGroup, extra := getExtraSupplementalGid(group, pod)
|
||||
if !extra {
|
||||
continue
|
||||
@ -359,29 +359,21 @@ func (vm *volumeManager) GetVolumesInUse() []v1.UniqueVolumeName {
|
||||
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
|
||||
allAttachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes()
|
||||
volumesToReportInUse := make([]v1.UniqueVolumeName, 0, len(desiredVolumes)+len(allAttachedVolumes))
|
||||
desiredVolumesMap := make(map[v1.UniqueVolumeName]bool, len(desiredVolumes)+len(allAttachedVolumes))
|
||||
|
||||
for _, volume := range desiredVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
|
||||
desiredVolumesMap[volume.VolumeName] = true
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
}
|
||||
|
||||
for _, volume := range allAttachedVolumes {
|
||||
if volume.PluginIsAttachable {
|
||||
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(volumesToReportInUse, func(i, j int) bool {
|
||||
return string(volumesToReportInUse[i]) < string(volumesToReportInUse[j])
|
||||
})
|
||||
return volumesToReportInUse
|
||||
slices.Sort(volumesToReportInUse)
|
||||
return slices.Compact(volumesToReportInUse)
|
||||
}
|
||||
|
||||
func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool {
|
||||
@ -471,12 +463,11 @@ func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error
|
||||
for _, v := range vm.actualStateOfWorld.GetMountedVolumesForPod(uniquePodName) {
|
||||
mountedVolumes = append(mountedVolumes, v.OuterVolumeSpecName)
|
||||
}
|
||||
sort.Strings(mountedVolumes)
|
||||
|
||||
if len(mountedVolumes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
slices.Sort(mountedVolumes)
|
||||
return fmt.Errorf(
|
||||
"mounted volumes=%v: %w",
|
||||
mountedVolumes,
|
||||
@ -506,7 +497,7 @@ func (vm *volumeManager) WaitForAllPodsUnmount(ctx context.Context, pods []*v1.P
|
||||
}
|
||||
|
||||
func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, expectedVolumes []string) []string {
|
||||
volumesNotInDSW := sets.New[string](expectedVolumes...)
|
||||
volumesNotInDSW := sets.New(expectedVolumes...)
|
||||
|
||||
for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() {
|
||||
if volumeToMount.PodName == uniquePodName {
|
||||
@ -528,7 +519,7 @@ func (vm *volumeManager) getUnattachedVolumes(uniquePodName types.UniquePodName)
|
||||
unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecName)
|
||||
}
|
||||
}
|
||||
sort.Strings(unattachedVolumes)
|
||||
slices.Sort(unattachedVolumes)
|
||||
|
||||
return unattachedVolumes
|
||||
}
|
||||
@ -582,7 +573,7 @@ func filterUnmountedVolumes(mountedVolumes sets.Set[string], expectedVolumes []s
|
||||
unmountedVolumes = append(unmountedVolumes, expectedVolume)
|
||||
}
|
||||
}
|
||||
sort.Strings(unmountedVolumes)
|
||||
slices.Sort(unmountedVolumes)
|
||||
|
||||
return unmountedVolumes
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user