mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Use a generic Set instead of a specified Set in kubelet
Signed-off-by: bzsuni <bingzhe.sun@daocloud.io>
This commit is contained in:
parent
5bf1e95541
commit
a8d51f4f05
@ -30,11 +30,11 @@ func TestKubeletConfigurationPathFields(t *testing.T) {
|
||||
if i := kubeletConfigurationPathFieldPaths.Intersection(kubeletConfigurationNonPathFieldPaths); len(i) > 0 {
|
||||
t.Fatalf("expect the intersection of kubeletConfigurationPathFieldPaths and "+
|
||||
"KubeletConfigurationNonPathFields to be empty, got:\n%s",
|
||||
strings.Join(i.List(), "\n"))
|
||||
strings.Join(sets.List(i), "\n"))
|
||||
}
|
||||
|
||||
// ensure that kubeletConfigurationPathFields U kubeletConfigurationNonPathFields == allPrimitiveFieldPaths(KubeletConfiguration)
|
||||
expect := sets.NewString().Union(kubeletConfigurationPathFieldPaths).Union(kubeletConfigurationNonPathFieldPaths)
|
||||
expect := sets.New[string]().Union(kubeletConfigurationPathFieldPaths).Union(kubeletConfigurationNonPathFieldPaths)
|
||||
result := allPrimitiveFieldPaths(t, expect, reflect.TypeOf(&KubeletConfiguration{}), nil)
|
||||
if !expect.Equal(result) {
|
||||
// expected fields missing from result
|
||||
@ -46,38 +46,38 @@ func TestKubeletConfigurationPathFields(t *testing.T) {
|
||||
"If the field has been removed, please remove it from the kubeletConfigurationPathFieldPaths set "+
|
||||
"and the KubeletConfigurationPathRefs function, "+
|
||||
"or remove it from the kubeletConfigurationNonPathFieldPaths set, as appropriate:\n%s",
|
||||
strings.Join(missing.List(), "\n"))
|
||||
strings.Join(sets.List(missing), "\n"))
|
||||
}
|
||||
if len(unexpected) > 0 {
|
||||
t.Errorf("the following fields were in the result, but unexpected. "+
|
||||
"If the field is new, please add it to the kubeletConfigurationPathFieldPaths set "+
|
||||
"and the KubeletConfigurationPathRefs function, "+
|
||||
"or add it to the kubeletConfigurationNonPathFieldPaths set, as appropriate:\n%s",
|
||||
strings.Join(unexpected.List(), "\n"))
|
||||
strings.Join(sets.List(unexpected), "\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// allPrimitiveFieldPaths returns the set of field paths in type `tp`, rooted at `path`.
|
||||
// It recursively descends into the definition of type `tp` accumulating paths to primitive leaf fields or paths in `skipRecurseList`.
|
||||
func allPrimitiveFieldPaths(t *testing.T, skipRecurseList sets.String, tp reflect.Type, path *field.Path) sets.String {
|
||||
func allPrimitiveFieldPaths(t *testing.T, skipRecurseList sets.Set[string], tp reflect.Type, path *field.Path) sets.Set[string] {
|
||||
// if the current field path is in the list of paths we should not recurse into,
|
||||
// return here rather than descending and accumulating child field paths
|
||||
if pathStr := path.String(); len(pathStr) > 0 && skipRecurseList.Has(pathStr) {
|
||||
return sets.NewString(pathStr)
|
||||
return sets.New[string](pathStr)
|
||||
}
|
||||
|
||||
paths := sets.NewString()
|
||||
paths := sets.New[string]()
|
||||
switch tp.Kind() {
|
||||
case reflect.Pointer:
|
||||
paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path).List()...)
|
||||
paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path))...)
|
||||
case reflect.Struct:
|
||||
for i := 0; i < tp.NumField(); i++ {
|
||||
field := tp.Field(i)
|
||||
paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, field.Type, path.Child(field.Name)).List()...)
|
||||
paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, field.Type, path.Child(field.Name)))...)
|
||||
}
|
||||
case reflect.Map, reflect.Slice:
|
||||
paths.Insert(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path.Key("*")).List()...)
|
||||
paths.Insert(sets.List(allPrimitiveFieldPaths(t, skipRecurseList, tp.Elem(), path.Key("*")))...)
|
||||
case reflect.Interface:
|
||||
t.Fatalf("unexpected interface{} field %s", path.String())
|
||||
default:
|
||||
@ -115,7 +115,7 @@ type bar struct {
|
||||
}
|
||||
|
||||
func TestAllPrimitiveFieldPaths(t *testing.T) {
|
||||
expect := sets.NewString(
|
||||
expect := sets.New[string](
|
||||
"str",
|
||||
"strptr",
|
||||
"ints[*]",
|
||||
@ -140,17 +140,17 @@ func TestAllPrimitiveFieldPaths(t *testing.T) {
|
||||
unexpected := result.Difference(expect)
|
||||
|
||||
if len(missing) > 0 {
|
||||
t.Errorf("the following fields were expected, but missing from the result:\n%s", strings.Join(missing.List(), "\n"))
|
||||
t.Errorf("the following fields were expected, but missing from the result:\n%s", strings.Join(sets.List(missing), "\n"))
|
||||
}
|
||||
if len(unexpected) > 0 {
|
||||
t.Errorf("the following fields were in the result, but unexpected:\n%s", strings.Join(unexpected.List(), "\n"))
|
||||
t.Errorf("the following fields were in the result, but unexpected:\n%s", strings.Join(sets.List(unexpected), "\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// KubeletConfiguration fields that contain file paths. If you update this, also update KubeletConfigurationPathRefs!
|
||||
kubeletConfigurationPathFieldPaths = sets.NewString(
|
||||
kubeletConfigurationPathFieldPaths = sets.New[string](
|
||||
"StaticPodPath",
|
||||
"Authentication.X509.ClientCAFile",
|
||||
"TLSCertFile",
|
||||
@ -160,7 +160,7 @@ var (
|
||||
)
|
||||
|
||||
// KubeletConfiguration fields that do not contain file paths.
|
||||
kubeletConfigurationNonPathFieldPaths = sets.NewString(
|
||||
kubeletConfigurationNonPathFieldPaths = sets.New[string](
|
||||
"Address",
|
||||
"AllowedUnsafeSysctls[*]",
|
||||
"Authentication.Anonymous.Enabled",
|
||||
|
@ -312,8 +312,8 @@ func setupPluginManager(t *testing.T, pluginSocketName string, m Manager) plugin
|
||||
}
|
||||
|
||||
func runPluginManager(pluginManager pluginmanager.PluginManager) {
|
||||
// FIXME: Replace sets.String with sets.Set[string]
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
// FIXME: Replace sets.Set[string] with sets.Set[string]
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true })
|
||||
go pluginManager.Run(sourcesReady, wait.NeverStop)
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ type PodConfig struct {
|
||||
|
||||
// contains the list of all configured sources
|
||||
sourcesLock sync.Mutex
|
||||
sources sets.String
|
||||
sources sets.Set[string]
|
||||
}
|
||||
|
||||
// NewPodConfig creates an object that can merge many configuration sources into a stream
|
||||
@ -79,7 +79,7 @@ func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder,
|
||||
pods: storage,
|
||||
mux: newMux(storage),
|
||||
updates: updates,
|
||||
sources: sets.String{},
|
||||
sources: sets.Set[string]{},
|
||||
}
|
||||
return podConfig
|
||||
}
|
||||
@ -95,14 +95,14 @@ func (c *PodConfig) Channel(ctx context.Context, source string) chan<- interface
|
||||
|
||||
// SeenAllSources returns true if seenSources contains all sources in the
|
||||
// config, and also this config has received a SET message from each source.
|
||||
func (c *PodConfig) SeenAllSources(seenSources sets.String) bool {
|
||||
func (c *PodConfig) SeenAllSources(seenSources sets.Set[string]) bool {
|
||||
if c.pods == nil {
|
||||
return false
|
||||
}
|
||||
c.sourcesLock.Lock()
|
||||
defer c.sourcesLock.Unlock()
|
||||
klog.V(5).InfoS("Looking for sources, have seen", "sources", c.sources.List(), "seenSources", seenSources)
|
||||
return seenSources.HasAll(c.sources.List()...) && c.pods.seenSources(c.sources.List()...)
|
||||
klog.V(5).InfoS("Looking for sources, have seen", "sources", sets.List(c.sources), "seenSources", seenSources)
|
||||
return seenSources.HasAll(sets.List(c.sources)...) && c.pods.seenSources(sets.List(c.sources)...)
|
||||
}
|
||||
|
||||
// Updates returns a channel of updates to the configuration, properly denormalized.
|
||||
@ -132,7 +132,7 @@ type podStorage struct {
|
||||
|
||||
// contains the set of all sources that have sent at least one SET
|
||||
sourcesSeenLock sync.RWMutex
|
||||
sourcesSeen sets.String
|
||||
sourcesSeen sets.Set[string]
|
||||
|
||||
// the EventRecorder to use
|
||||
recorder record.EventRecorder
|
||||
@ -148,7 +148,7 @@ func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificatio
|
||||
pods: make(map[string]map[types.UID]*v1.Pod),
|
||||
mode: mode,
|
||||
updates: updates,
|
||||
sourcesSeen: sets.String{},
|
||||
sourcesSeen: sets.Set[string]{},
|
||||
recorder: recorder,
|
||||
startupSLIObserver: startupSLIObserver,
|
||||
}
|
||||
@ -331,7 +331,7 @@ func (s *podStorage) seenSources(sources ...string) bool {
|
||||
}
|
||||
|
||||
func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) {
|
||||
names := sets.String{}
|
||||
names := sets.Set[string]{}
|
||||
for i, pod := range pods {
|
||||
// Pods from each source are assumed to have passed validation individually.
|
||||
// This function only checks if there is any naming conflict.
|
||||
|
@ -455,7 +455,7 @@ func TestPodConfigRace(t *testing.T) {
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster(record.WithContext(tCtx))
|
||||
config := NewPodConfig(PodConfigNotificationIncremental, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"}), &mockPodStartupSLIObserver{})
|
||||
seenSources := sets.NewString(TestSource)
|
||||
seenSources := sets.New[string](TestSource)
|
||||
var wg sync.WaitGroup
|
||||
const iterations = 100
|
||||
wg.Add(2)
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
// SourcesReadyFn is function that returns true if the specified sources have been seen.
|
||||
type SourcesReadyFn func(sourcesSeen sets.String) bool
|
||||
type SourcesReadyFn func(sourcesSeen sets.Set[string]) bool
|
||||
|
||||
// SourcesReady tracks the set of configured sources seen by the kubelet.
|
||||
type SourcesReady interface {
|
||||
@ -37,7 +37,7 @@ type SourcesReady interface {
|
||||
// NewSourcesReady returns a SourcesReady with the specified function.
|
||||
func NewSourcesReady(sourcesReadyFn SourcesReadyFn) SourcesReady {
|
||||
return &sourcesImpl{
|
||||
sourcesSeen: sets.NewString(),
|
||||
sourcesSeen: sets.New[string](),
|
||||
sourcesReadyFn: sourcesReadyFn,
|
||||
}
|
||||
}
|
||||
@ -47,7 +47,7 @@ type sourcesImpl struct {
|
||||
// lock protects access to sources seen.
|
||||
lock sync.RWMutex
|
||||
// set of sources seen.
|
||||
sourcesSeen sets.String
|
||||
sourcesSeen sets.Set[string]
|
||||
// sourcesReady is a function that evaluates if the sources are ready.
|
||||
sourcesReadyFn SourcesReadyFn
|
||||
}
|
||||
|
@ -98,8 +98,8 @@ func (c *configMapManager) UnregisterPod(pod *v1.Pod) {
|
||||
c.manager.UnregisterPod(pod)
|
||||
}
|
||||
|
||||
func getConfigMapNames(pod *v1.Pod) sets.String {
|
||||
result := sets.NewString()
|
||||
func getConfigMapNames(pod *v1.Pod) sets.Set[string] {
|
||||
result := sets.New[string]()
|
||||
podutil.VisitPodConfigmapNames(pod, func(name string) bool {
|
||||
result.Insert(name)
|
||||
return true
|
||||
|
@ -164,7 +164,7 @@ func ExpandContainerCommandOnlyStatic(containerCommand []string, envs []v1.EnvVa
|
||||
func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, error) {
|
||||
|
||||
envmap := envVarsToMap(envs)
|
||||
missingKeys := sets.NewString()
|
||||
missingKeys := sets.New[string]()
|
||||
expanded := expansion.Expand(mount.SubPathExpr, func(key string) string {
|
||||
value, ok := envmap[key]
|
||||
if !ok || len(value) == 0 {
|
||||
@ -174,7 +174,7 @@ func ExpandContainerVolumeMounts(mount v1.VolumeMount, envs []EnvVar) (string, e
|
||||
})
|
||||
|
||||
if len(missingKeys) > 0 {
|
||||
return "", fmt.Errorf("missing value for %s", strings.Join(missingKeys.List(), ", "))
|
||||
return "", fmt.Errorf("missing value for %s", strings.Join(sets.List(missingKeys), ", "))
|
||||
}
|
||||
return expanded, nil
|
||||
}
|
||||
|
@ -232,9 +232,9 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) {
|
||||
return im.imageCache.get(), nil
|
||||
}
|
||||
|
||||
func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) {
|
||||
func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.Set[string], error) {
|
||||
isRuntimeClassInImageCriAPIEnabled := utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClassInImageCriAPI)
|
||||
imagesInUse := sets.NewString()
|
||||
imagesInUse := sets.New[string]()
|
||||
|
||||
images, err := im.runtime.ListImages(ctx)
|
||||
if err != nil {
|
||||
@ -261,7 +261,7 @@ func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.
|
||||
|
||||
// Add new images and record those being used.
|
||||
now := time.Now()
|
||||
currentImages := sets.NewString()
|
||||
currentImages := sets.New[string]()
|
||||
im.imageRecordsLock.Lock()
|
||||
defer im.imageRecordsLock.Unlock()
|
||||
for _, image := range images {
|
||||
@ -554,7 +554,7 @@ func (ev byLastUsedAndDetected) Less(i, j int) bool {
|
||||
return ev[i].lastUsed.Before(ev[j].lastUsed)
|
||||
}
|
||||
|
||||
func isImageUsed(imageID string, imagesInUse sets.String) bool {
|
||||
func isImageUsed(imageID string, imagesInUse sets.Set[string]) bool {
|
||||
// Check the image ID.
|
||||
if _, ok := imagesInUse[imageID]; ok {
|
||||
return true
|
||||
|
@ -2230,7 +2230,7 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
|
||||
func (kl *Kubelet) getPodsToSync() []*v1.Pod {
|
||||
allPods := kl.podManager.GetPods()
|
||||
podUIDs := kl.workQueue.GetWork()
|
||||
podUIDSet := sets.NewString()
|
||||
podUIDSet := sets.New[string]()
|
||||
for _, podUID := range podUIDs {
|
||||
podUIDSet.Insert(string(podUID))
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
||||
// reconcileHugePageResource will update huge page capacity for each page size and remove huge page sizes no longer supported
|
||||
func (kl *Kubelet) reconcileHugePageResource(initialNode, existingNode *v1.Node) bool {
|
||||
requiresUpdate := updateDefaultResources(initialNode, existingNode)
|
||||
supportedHugePageResources := sets.String{}
|
||||
supportedHugePageResources := sets.Set[string]{}
|
||||
|
||||
for resourceName := range initialNode.Status.Capacity {
|
||||
if !v1helper.IsHugePageResourceName(resourceName) {
|
||||
|
@ -631,7 +631,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod,
|
||||
return opts, cleanupAction, nil
|
||||
}
|
||||
|
||||
var masterServices = sets.NewString("kubernetes")
|
||||
var masterServices = sets.New[string]("kubernetes")
|
||||
|
||||
// getServiceEnvVarMap makes a map[string]string of env vars for services a
|
||||
// pod in namespace ns should see.
|
||||
|
@ -209,7 +209,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.runtimeState.setNetworkState(nil)
|
||||
kubelet.rootDirectory = t.TempDir()
|
||||
kubelet.podLogsDirectory = t.TempDir()
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return true })
|
||||
kubelet.serviceLister = testServiceLister{}
|
||||
kubelet.serviceHasSynced = func() bool { return true }
|
||||
kubelet.nodeHasSynced = func() bool { return true }
|
||||
@ -728,7 +728,7 @@ func TestHandlePodRemovesWhenSourcesAreReady(t *testing.T) {
|
||||
{Pod: fakePod},
|
||||
}
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.String) bool { return ready })
|
||||
kubelet.sourcesReady = config.NewSourcesReady(func(_ sets.Set[string]) bool { return ready })
|
||||
|
||||
kubelet.HandlePodRemoves(pods)
|
||||
time.Sleep(2 * time.Second)
|
||||
|
@ -169,7 +169,7 @@ func (kl *Kubelet) removeOrphanedPodVolumeDirs(uid types.UID) []error {
|
||||
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
||||
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
|
||||
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
|
||||
allPods := sets.NewString()
|
||||
allPods := sets.New[string]()
|
||||
for _, pod := range pods {
|
||||
allPods.Insert(string(pod.UID))
|
||||
}
|
||||
|
@ -846,7 +846,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con
|
||||
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
// only the last execution of each init container should be preserved, and only preserve it if it is in the
|
||||
// list of init containers to keep.
|
||||
initContainerNames := sets.NewString()
|
||||
initContainerNames := sets.New[string]()
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
initContainerNames.Insert(container.Name)
|
||||
}
|
||||
@ -880,7 +880,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C
|
||||
// of the container because it assumes all init containers have been stopped
|
||||
// before the call happens.
|
||||
func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) {
|
||||
initContainerNames := sets.NewString()
|
||||
initContainerNames := sets.New[string]()
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
initContainerNames.Insert(container.Name)
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods
|
||||
}
|
||||
|
||||
// collect all the PodSandboxId of container
|
||||
sandboxIDs := sets.NewString()
|
||||
sandboxIDs := sets.New[string]()
|
||||
for _, container := range containers {
|
||||
sandboxIDs.Insert(container.PodSandboxId)
|
||||
}
|
||||
|
@ -88,17 +88,17 @@ func TestListImages(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
images := []string{"1111", "2222", "3333"}
|
||||
expected := sets.NewString(images...)
|
||||
expected := sets.New[string](images...)
|
||||
fakeImageService.SetFakeImages(images)
|
||||
|
||||
actualImages, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
actual := sets.NewString()
|
||||
actual := sets.New[string]()
|
||||
for _, i := range actualImages {
|
||||
actual.Insert(i.ID)
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.List(), actual.List())
|
||||
assert.Equal(t, sets.List(expected), sets.List(actual))
|
||||
}
|
||||
|
||||
func TestListImagesPinnedField(t *testing.T) {
|
||||
|
@ -246,8 +246,8 @@ func verifyPods(a, b []*kubecontainer.Pod) bool {
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
|
||||
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) {
|
||||
actual := sets.NewString()
|
||||
func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.Set[string]) (sets.Set[string], bool) {
|
||||
actual := sets.New[string]()
|
||||
for _, c := range fakeRuntime.Containers {
|
||||
actual.Insert(c.Id)
|
||||
}
|
||||
@ -741,7 +741,7 @@ func TestPruneInitContainers(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
|
||||
expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
|
||||
expectedContainers := sets.New[string](fakes[0].Id, fakes[2].Id)
|
||||
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
|
||||
t.Errorf("expected %v, got %v", expectedContainers, actual)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Me
|
||||
lv = append([]string{pvcRef.Namespace, pvcRef.Name}, lv...)
|
||||
ch <- metrics.NewLazyConstMetric(desc, metrics.GaugeValue, v, lv...)
|
||||
}
|
||||
allPVCs := sets.String{}
|
||||
allPVCs := sets.Set[string]{}
|
||||
for _, podStat := range podStats {
|
||||
if podStat.VolumeStats == nil {
|
||||
continue
|
||||
|
@ -104,7 +104,7 @@ func TestParseResolvConf(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i)
|
||||
assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i)
|
||||
assert.EqualValues(t, sets.NewString(tc.options...), sets.NewString(opts...), "test case [%d] options", i)
|
||||
assert.EqualValues(t, sets.New[string](tc.options...), sets.New[string](opts...), "test case [%d] options", i)
|
||||
} else {
|
||||
require.Error(t, err, "tc.searches %v", tc.searches)
|
||||
}
|
||||
@ -334,7 +334,7 @@ func TestMergeDNSOptions(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
options := mergeDNSOptions(tc.existingDNSConfigOptions, tc.dnsConfigOptions)
|
||||
// Options order may be changed after conversion.
|
||||
if !sets.NewString(options...).Equal(sets.NewString(tc.expectedOptions...)) {
|
||||
if !sets.New[string](options...).Equal(sets.New[string](tc.expectedOptions...)) {
|
||||
t.Errorf("%s: mergeDNSOptions(%v, %v)=%v, want %v", tc.desc, tc.existingDNSConfigOptions, tc.dnsConfigOptions, options, tc.expectedOptions)
|
||||
}
|
||||
}
|
||||
@ -697,7 +697,7 @@ func dnsConfigsAreEqual(resConfig, expectedConfig *runtimeapi.DNSConfig) bool {
|
||||
}
|
||||
}
|
||||
// Options order may be changed after conversion.
|
||||
return sets.NewString(resConfig.Options...).Equal(sets.NewString(expectedConfig.Options...))
|
||||
return sets.New[string](resConfig.Options...).Equal(sets.New[string](expectedConfig.Options...))
|
||||
}
|
||||
|
||||
func newTestPods(count int) []*v1.Pod {
|
||||
|
@ -361,7 +361,7 @@ func (g *GenericPLEG) Relist() {
|
||||
}
|
||||
|
||||
func getContainersFromPods(pods ...*kubecontainer.Pod) []*kubecontainer.Container {
|
||||
cidSet := sets.NewString()
|
||||
cidSet := sets.New[string]()
|
||||
var containers []*kubecontainer.Container
|
||||
fillCidSet := func(cs []*kubecontainer.Container) {
|
||||
for _, c := range cs {
|
||||
|
@ -131,7 +131,7 @@ func TestPluginRegistration(t *testing.T) {
|
||||
stopChan := make(chan struct{})
|
||||
defer close(stopChan)
|
||||
go func() {
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true })
|
||||
pluginManager.Run(sourcesReady, stopChan)
|
||||
}()
|
||||
|
||||
|
@ -19,7 +19,7 @@ package testing
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -29,14 +29,14 @@ type FakeMirrorClient struct {
|
||||
mirrorPodLock sync.RWMutex
|
||||
// Note that a real mirror manager does not store the mirror pods in
|
||||
// itself. This fake manager does this to track calls.
|
||||
mirrorPods sets.String
|
||||
mirrorPods sets.Set[string]
|
||||
createCounts map[string]int
|
||||
deleteCounts map[string]int
|
||||
}
|
||||
|
||||
func NewFakeMirrorClient() *FakeMirrorClient {
|
||||
m := FakeMirrorClient{}
|
||||
m.mirrorPods = sets.NewString()
|
||||
m.mirrorPods = sets.New[string]()
|
||||
m.createCounts = make(map[string]int)
|
||||
m.deleteCounts = make(map[string]int)
|
||||
return &m
|
||||
@ -75,7 +75,7 @@ func (fmc *FakeMirrorClient) NumOfPods() int {
|
||||
func (fmc *FakeMirrorClient) GetPods() []string {
|
||||
fmc.mirrorPodLock.RLock()
|
||||
defer fmc.mirrorPodLock.RUnlock()
|
||||
return fmc.mirrorPods.List()
|
||||
return sets.List(fmc.mirrorPods)
|
||||
}
|
||||
|
||||
func (fmc *FakeMirrorClient) GetCounts(podFullName string) (int, int) {
|
||||
|
@ -220,10 +220,10 @@ func (q *fakeQueue) Items() []FakeQueueItem {
|
||||
return append(make([]FakeQueueItem, 0, len(q.queue)), q.queue...)
|
||||
}
|
||||
|
||||
func (q *fakeQueue) Set() sets.String {
|
||||
func (q *fakeQueue) Set() sets.Set[string] {
|
||||
q.lock.Lock()
|
||||
defer q.lock.Unlock()
|
||||
work := sets.NewString()
|
||||
work := sets.New[string]()
|
||||
for _, item := range q.queue[q.currentStart:] {
|
||||
work.Insert(string(item.UID))
|
||||
}
|
||||
@ -476,7 +476,7 @@ func drainWorkers(podWorkers *podWorkers, numPods int) {
|
||||
}
|
||||
|
||||
func drainWorkersExcept(podWorkers *podWorkers, uids ...types.UID) {
|
||||
set := sets.NewString()
|
||||
set := sets.New[string]()
|
||||
for _, uid := range uids {
|
||||
set.Insert(string(uid))
|
||||
}
|
||||
@ -982,8 +982,8 @@ func TestUpdatePodDoesNotForgetSyncPodKill(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func newUIDSet(uids ...types.UID) sets.String {
|
||||
set := sets.NewString()
|
||||
func newUIDSet(uids ...types.UID) sets.Set[string] {
|
||||
set := sets.New[string]()
|
||||
for _, uid := range uids {
|
||||
set.Insert(string(uid))
|
||||
}
|
||||
@ -993,7 +993,7 @@ func newUIDSet(uids ...types.UID) sets.String {
|
||||
type terminalPhaseSync struct {
|
||||
lock sync.Mutex
|
||||
fn syncPodFnType
|
||||
terminal sets.String
|
||||
terminal sets.Set[string]
|
||||
}
|
||||
|
||||
func (s *terminalPhaseSync) SyncPod(ctx context.Context, updateType kubetypes.SyncPodType, pod *v1.Pod, mirrorPod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, error) {
|
||||
@ -1018,7 +1018,7 @@ func (s *terminalPhaseSync) SetTerminal(uid types.UID) {
|
||||
func newTerminalPhaseSync(fn syncPodFnType) *terminalPhaseSync {
|
||||
return &terminalPhaseSync{
|
||||
fn: fn,
|
||||
terminal: sets.NewString(),
|
||||
terminal: sets.New[string](),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1171,7 +1171,7 @@ func TestStaticPodExclusion(t *testing.T) {
|
||||
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
|
||||
}
|
||||
// verify all are enqueued
|
||||
if e, a := sets.NewString("1-normal", "2-static", "4-static", "3-static"), podWorkers.workQueue.(*fakeQueue).Set(); !e.Equal(a) {
|
||||
if e, a := sets.New[string]("1-normal", "2-static", "4-static", "3-static"), podWorkers.workQueue.(*fakeQueue).Set(); !e.Equal(a) {
|
||||
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
|
||||
}
|
||||
|
||||
@ -1191,7 +1191,7 @@ func TestStaticPodExclusion(t *testing.T) {
|
||||
t.Fatalf("unexpected waiting static pods: %s", cmp.Diff(e, a))
|
||||
}
|
||||
// the queue should include a single item for 3-static (indicating we need to retry later)
|
||||
if e, a := sets.NewString("3-static"), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
|
||||
if e, a := sets.New[string]("3-static"), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
|
||||
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
|
||||
}
|
||||
|
||||
@ -1209,7 +1209,7 @@ func TestStaticPodExclusion(t *testing.T) {
|
||||
t.Fatalf("unexpected pod state: %#v", pod3)
|
||||
}
|
||||
// the queue should be empty because the worker is now done
|
||||
if e, a := sets.NewString(), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
|
||||
if e, a := sets.New[string](), newUIDSet(podWorkers.workQueue.GetWork()...); !reflect.DeepEqual(e, a) {
|
||||
t.Fatalf("unexpected queued items: %s", cmp.Diff(e, a))
|
||||
}
|
||||
// 2-static is still running
|
||||
|
@ -99,8 +99,8 @@ func (s *secretManager) UnregisterPod(pod *v1.Pod) {
|
||||
s.manager.UnregisterPod(pod)
|
||||
}
|
||||
|
||||
func getSecretNames(pod *v1.Pod) sets.String {
|
||||
result := sets.NewString()
|
||||
func getSecretNames(pod *v1.Pod) sets.Set[string] {
|
||||
result := sets.New[string]()
|
||||
podutil.VisitPodSecretNames(pod, func(name string) bool {
|
||||
result.Insert(name)
|
||||
return true
|
||||
|
@ -108,8 +108,8 @@ type Server struct {
|
||||
auth AuthInterface
|
||||
host HostInterface
|
||||
restfulCont containerInterface
|
||||
metricsBuckets sets.String
|
||||
metricsMethodBuckets sets.String
|
||||
metricsBuckets sets.Set[string]
|
||||
metricsMethodBuckets sets.Set[string]
|
||||
resourceAnalyzer stats.ResourceAnalyzer
|
||||
}
|
||||
|
||||
@ -280,8 +280,8 @@ func NewServer(
|
||||
resourceAnalyzer: resourceAnalyzer,
|
||||
auth: auth,
|
||||
restfulCont: &filteringContainer{Container: restful.NewContainer()},
|
||||
metricsBuckets: sets.NewString(),
|
||||
metricsMethodBuckets: sets.NewString("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"),
|
||||
metricsBuckets: sets.New[string](),
|
||||
metricsMethodBuckets: sets.New[string]("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"),
|
||||
}
|
||||
if auth != nil {
|
||||
server.InstallAuthFilter()
|
||||
|
@ -472,7 +472,7 @@ func (m *UsernsManager) CleanupOrphanedPodUsernsAllocations(pods []*v1.Pod, runn
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
allPods := sets.NewString()
|
||||
allPods := sets.New[string]()
|
||||
for _, pod := range pods {
|
||||
allPods.Insert(string(pod.UID))
|
||||
}
|
||||
@ -480,7 +480,7 @@ func (m *UsernsManager) CleanupOrphanedPodUsernsAllocations(pods []*v1.Pod, runn
|
||||
allPods.Insert(string(pod.ID))
|
||||
}
|
||||
|
||||
allFound := sets.NewString()
|
||||
allFound := sets.New[string]()
|
||||
found, err := m.kl.ListPodsFromDisk()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -210,7 +210,7 @@ func (s *objectStore) Get(namespace, name string) (runtime.Object, error) {
|
||||
// (e.g. ttl-based implementation vs watch-based implementation).
|
||||
type cacheBasedManager struct {
|
||||
objectStore Store
|
||||
getReferencedObjects func(*v1.Pod) sets.String
|
||||
getReferencedObjects func(*v1.Pod) sets.Set[string]
|
||||
|
||||
lock sync.Mutex
|
||||
registeredPods map[objectKey]*v1.Pod
|
||||
@ -273,7 +273,7 @@ func (c *cacheBasedManager) UnregisterPod(pod *v1.Pod) {
|
||||
// - every GetObject() call tries to fetch the value from local cache; if it is
|
||||
// not there, invalidated or too old, we fetch it from apiserver and refresh the
|
||||
// value in cache; otherwise it is just fetched from cache
|
||||
func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.String) Manager {
|
||||
func NewCacheBasedManager(objectStore Store, getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager {
|
||||
return &cacheBasedManager{
|
||||
objectStore: objectStore,
|
||||
getReferencedObjects: getReferencedObjects,
|
||||
|
@ -73,8 +73,8 @@ func newSecretStore(fakeClient clientset.Interface, clock clock.Clock, getTTL Ge
|
||||
}
|
||||
}
|
||||
|
||||
func getSecretNames(pod *v1.Pod) sets.String {
|
||||
result := sets.NewString()
|
||||
func getSecretNames(pod *v1.Pod) sets.Set[string] {
|
||||
result := sets.New[string]()
|
||||
podutil.VisitPodSecretNames(pod, func(name string) bool {
|
||||
result.Insert(name)
|
||||
return true
|
||||
|
@ -387,7 +387,7 @@ func NewWatchBasedManager(
|
||||
isImmutable isImmutableFunc,
|
||||
groupResource schema.GroupResource,
|
||||
resyncInterval time.Duration,
|
||||
getReferencedObjects func(*v1.Pod) sets.String) Manager {
|
||||
getReferencedObjects func(*v1.Pod) sets.Set[string]) Manager {
|
||||
|
||||
// If a configmap/secret is used as a volume, the volumeManager will visit the objectCacheItem every resyncInterval cycle,
|
||||
// We just want to stop the objectCacheItem referenced by environment variables,
|
||||
|
@ -37,16 +37,16 @@ func newTestBasicWorkQueue() (*basicWorkQueue, *testingclock.FakeClock) {
|
||||
}
|
||||
|
||||
func compareResults(t *testing.T, expected, actual []types.UID) {
|
||||
expectedSet := sets.NewString()
|
||||
expectedSet := sets.New[string]()
|
||||
for _, u := range expected {
|
||||
expectedSet.Insert(string(u))
|
||||
}
|
||||
actualSet := sets.NewString()
|
||||
actualSet := sets.New[string]()
|
||||
for _, u := range actual {
|
||||
actualSet.Insert(string(u))
|
||||
}
|
||||
if !expectedSet.Equal(actualSet) {
|
||||
t.Errorf("Expected %#v, got %#v", expectedSet.List(), actualSet.List())
|
||||
t.Errorf("Expected %#v, got %#v", sets.List(expectedSet), sets.List(actualSet))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr, seLinuxTran
|
||||
return &desiredStateOfWorld{
|
||||
volumesToMount: make(map[v1.UniqueVolumeName]volumeToMount),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
podErrors: make(map[types.UniquePodName]sets.String),
|
||||
podErrors: make(map[types.UniquePodName]sets.Set[string]),
|
||||
seLinuxTranslator: seLinuxTranslator,
|
||||
}
|
||||
}
|
||||
@ -166,7 +166,7 @@ type desiredStateOfWorld struct {
|
||||
// plugin objects.
|
||||
volumePluginMgr *volume.VolumePluginMgr
|
||||
// podErrors are errors caught by desiredStateOfWorldPopulator about volumes for a given pod.
|
||||
podErrors map[types.UniquePodName]sets.String
|
||||
podErrors map[types.UniquePodName]sets.Set[string]
|
||||
// seLinuxTranslator translates v1.SELinuxOptions to a file SELinux label.
|
||||
seLinuxTranslator util.SELinuxLabelTranslator
|
||||
|
||||
@ -629,7 +629,7 @@ func (dsw *desiredStateOfWorld) AddErrorToPod(podName types.UniquePodName, err s
|
||||
}
|
||||
return
|
||||
}
|
||||
dsw.podErrors[podName] = sets.NewString(err)
|
||||
dsw.podErrors[podName] = sets.New[string](err)
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) PopPodErrors(podName types.UniquePodName) []string {
|
||||
@ -638,7 +638,7 @@ func (dsw *desiredStateOfWorld) PopPodErrors(podName types.UniquePodName) []stri
|
||||
|
||||
if errs, found := dsw.podErrors[podName]; found {
|
||||
delete(dsw.podErrors, podName)
|
||||
return errs.List()
|
||||
return sets.List(errs)
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
@ -114,12 +114,12 @@ func TestReconstructVolumes(t *testing.T) {
|
||||
t.Errorf("Expected expectedVolumesNeedDevicePath:\n%v\n got:\n%v", expectedVolumes, rcInstance.volumesNeedUpdateFromNodeStatus)
|
||||
}
|
||||
|
||||
volumesFailedReconstruction := sets.NewString()
|
||||
volumesFailedReconstruction := sets.New[string]()
|
||||
for _, vol := range rcInstance.volumesFailedReconstruction {
|
||||
volumesFailedReconstruction.Insert(vol.volumeSpecName)
|
||||
}
|
||||
if !reflect.DeepEqual(volumesFailedReconstruction.List(), tc.expectedVolumesFailedReconstruction) {
|
||||
t.Errorf("Expected volumesFailedReconstruction:\n%v\n got:\n%v", tc.expectedVolumesFailedReconstruction, volumesFailedReconstruction.List())
|
||||
if !reflect.DeepEqual(sets.List(volumesFailedReconstruction), tc.expectedVolumesFailedReconstruction) {
|
||||
t.Errorf("Expected volumesFailedReconstruction:\n%v\n got:\n%v", tc.expectedVolumesFailedReconstruction, sets.List(volumesFailedReconstruction))
|
||||
}
|
||||
|
||||
if tc.verifyFunc != nil {
|
||||
|
@ -323,7 +323,7 @@ func (vm *volumeManager) GetPossiblyMountedVolumesForPod(podName types.UniquePod
|
||||
|
||||
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||
podName := util.GetUniquePodName(pod)
|
||||
supplementalGroups := sets.NewString()
|
||||
supplementalGroups := sets.New[string]()
|
||||
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
if mountedVolume.VolumeGidValue != "" {
|
||||
@ -332,7 +332,7 @@ func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||
}
|
||||
|
||||
result := make([]int64, 0, supplementalGroups.Len())
|
||||
for _, group := range supplementalGroups.List() {
|
||||
for _, group := range sets.List(supplementalGroups) {
|
||||
iGroup, extra := getExtraSupplementalGid(group, pod)
|
||||
if !extra {
|
||||
continue
|
||||
@ -480,7 +480,7 @@ func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error
|
||||
}
|
||||
|
||||
func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, expectedVolumes []string) []string {
|
||||
volumesNotInDSW := sets.NewString(expectedVolumes...)
|
||||
volumesNotInDSW := sets.New[string](expectedVolumes...)
|
||||
|
||||
for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() {
|
||||
if volumeToMount.PodName == uniquePodName {
|
||||
@ -488,7 +488,7 @@ func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, e
|
||||
}
|
||||
}
|
||||
|
||||
return volumesNotInDSW.List()
|
||||
return sets.List(volumesNotInDSW)
|
||||
}
|
||||
|
||||
// getUnattachedVolumes returns a list of the volumes that are expected to be attached but
|
||||
@ -534,7 +534,7 @@ func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName)
|
||||
// expectedVolumes. It returns a list of unmounted volumes.
|
||||
// The list also includes volume that may be mounted in uncertain state.
|
||||
func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string {
|
||||
mountedVolumes := sets.NewString()
|
||||
mountedVolumes := sets.New[string]()
|
||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
|
||||
}
|
||||
@ -543,7 +543,7 @@ func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expect
|
||||
|
||||
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
|
||||
// mountedVolumes to a list of unmountedVolumes and returns it.
|
||||
func filterUnmountedVolumes(mountedVolumes sets.String, expectedVolumes []string) []string {
|
||||
func filterUnmountedVolumes(mountedVolumes sets.Set[string], expectedVolumes []string) []string {
|
||||
unmountedVolumes := []string{}
|
||||
for _, expectedVolume := range expectedVolumes {
|
||||
if !mountedVolumes.Has(expectedVolume) {
|
||||
|
@ -542,7 +542,7 @@ func runVolumeManager(manager VolumeManager) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
//readyCh := make(chan bool, 1)
|
||||
//readyCh <- true
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.String) bool { return true })
|
||||
sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true })
|
||||
go manager.Run(sourcesReady, stopCh)
|
||||
return stopCh
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func (n *networkCounter) mergeCollectedData(packetsReceivedPerSecondData,
|
||||
packetsReceivedErrorsData,
|
||||
packetsOutboundDiscardedData,
|
||||
packetsOutboundErrorsData map[string]uint64) {
|
||||
adapters := sets.NewString()
|
||||
adapters := sets.New[string]()
|
||||
|
||||
// merge the collected data and list of adapters.
|
||||
adapters.Insert(n.mergePacketsReceivedPerSecondData(packetsReceivedPerSecondData)...)
|
||||
|
Loading…
Reference in New Issue
Block a user